diff --git "a/1227.jsonl" "b/1227.jsonl" new file mode 100644--- /dev/null +++ "b/1227.jsonl" @@ -0,0 +1,414 @@ +{"seq_id": "465590939", "text": "# -*- coding: utf-8 -*-\n\nfrom zlapp.load import zl_app\nfrom flask import Module\nfrom flask import redirect\nfrom flask import url_for\nfrom flask import request\nfrom flask import flash\nfrom flask import render_template\nfrom flask.ext.login import current_user\nfrom flask.ext.login import login_required\nfrom zlapp.load import db\nfrom zlapp.models.comments_model import CommentsInfo\nfrom zlapp.models.posts_model import PostsInfo\nfrom zlapp.forms.posts_forms import PostsForm\nfrom zlapp.forms.comments_forms import CommentsForm\n\nposts = Module(__name__)\n\n\n@zl_app.route('/posts/listposts/', methods=['GET', 'POST'])\n@login_required\ndef list_posts():\n list_posts = PostsInfo.query.filter_by().all()\n return render_template('admin/posts_list.html', list_posts=list_posts)\n\n\n@zl_app.route('/posts/listmyposts/', methods=['GET', 'POST'])\n@login_required\ndef list_myposts():\n list_posts = PostsInfo.query.filter_by(user_id=current_user.id).all()\n return render_template('admin/posts_mylist.html', list_posts=list_posts)\n\n\n@zl_app.route('/posts/addposts/', methods=['GET', 'POST'])\n@login_required\ndef add_posts():\n posts_form = PostsForm()\n if request.method == 'POST':\n if posts_form.validate:\n posts_info = PostsInfo()\n posts_form.populate_obj(posts_info)\n posts_info.user_id = current_user.id\n db.session.add(posts_info)\n db.session.commit()\n flash(u'帖子发表成功。')\n else:\n flash(u'帖子发表失败。')\n return render_template('admin/posts_add.html', form=posts_form)\n\n@zl_app.route('/posts/deleteposts//', methods=['GET', 'POST'])\n@login_required\ndef delete_posts(posts_id):\n CommentsInfo.query.filter_by(posts_id=posts_id).delete()\n PostsInfo.query.filter_by(id=posts_id).delete()\n flash(u'帖子删除成功。')\n return redirect(url_for('list_posts'))\n\n@zl_app.route('/posts/infoposts//', methods=['GET', 'POST'])\n@login_required\ndef info_posts(posts_id):\n comments_form = CommentsForm()\n if request.method == 'POST' and comments_form.validate:\n comments_info = CommentsInfo()\n comments_form.populate_obj(comments_info)\n comments_info.posts_id = posts_id\n comments_info.user_id = current_user.id\n db.session.add(comments_info)\n db.session.commit()\n flash(u'回复成功。')\n\n info_posts = PostsInfo.query.filter_by(id=posts_id).first()\n list_comments = CommentsInfo.query.filter_by(posts_id=posts_id).all()\n return render_template('admin/posts_info.html', form=comments_form, info_posts=info_posts, list_comments=list_comments)\n", "sub_path": "zlapp/views/posts_view.py", "file_name": "posts_view.py", "file_ext": "py", "file_size_in_byte": 2656, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Module", "line_number": 18, "usage_type": "call"}, {"api_name": "zlapp.models.posts_model.PostsInfo.query.filter_by", "line_number": 24, "usage_type": "call"}, {"api_name": "zlapp.models.posts_model.PostsInfo.query", "line_number": 24, "usage_type": "attribute"}, {"api_name": "zlapp.models.posts_model.PostsInfo", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "zlapp.load.zl_app.route", "line_number": 21, "usage_type": "call"}, {"api_name": "zlapp.load.zl_app", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.ext.login.login_required", "line_number": 22, "usage_type": "name"}, {"api_name": "zlapp.models.posts_model.PostsInfo.query.filter_by", "line_number": 31, "usage_type": "call"}, {"api_name": "zlapp.models.posts_model.PostsInfo.query", "line_number": 31, "usage_type": "attribute"}, {"api_name": "zlapp.models.posts_model.PostsInfo", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.ext.login.current_user.id", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "zlapp.load.zl_app.route", "line_number": 28, "usage_type": "call"}, {"api_name": "zlapp.load.zl_app", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.ext.login.login_required", "line_number": 29, "usage_type": "name"}, {"api_name": "zlapp.forms.posts_forms.PostsForm", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "zlapp.models.posts_model.PostsInfo", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user.id", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user", "line_number": 43, "usage_type": "name"}, {"api_name": "zlapp.load.db.session.add", "line_number": 44, "usage_type": "call"}, {"api_name": "zlapp.load.db.session", "line_number": 44, "usage_type": "attribute"}, {"api_name": "zlapp.load.db", "line_number": 44, "usage_type": "name"}, {"api_name": "zlapp.load.db.session.commit", "line_number": 45, "usage_type": "call"}, {"api_name": "zlapp.load.db.session", "line_number": 45, "usage_type": "attribute"}, {"api_name": "zlapp.load.db", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 49, "usage_type": "call"}, {"api_name": "zlapp.load.zl_app.route", "line_number": 35, "usage_type": "call"}, {"api_name": "zlapp.load.zl_app", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.ext.login.login_required", "line_number": 36, "usage_type": "name"}, {"api_name": "zlapp.models.comments_model.CommentsInfo.query.filter_by", "line_number": 54, "usage_type": "call"}, {"api_name": "zlapp.models.comments_model.CommentsInfo.query", "line_number": 54, "usage_type": "attribute"}, {"api_name": "zlapp.models.comments_model.CommentsInfo", "line_number": 54, "usage_type": "name"}, {"api_name": "zlapp.models.posts_model.PostsInfo.query.filter_by", "line_number": 55, "usage_type": "call"}, {"api_name": "zlapp.models.posts_model.PostsInfo.query", "line_number": 55, "usage_type": "attribute"}, {"api_name": "zlapp.models.posts_model.PostsInfo", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 57, "usage_type": "call"}, {"api_name": "zlapp.load.zl_app.route", "line_number": 51, "usage_type": "call"}, {"api_name": "zlapp.load.zl_app", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.ext.login.login_required", "line_number": 52, "usage_type": "name"}, {"api_name": "zlapp.forms.comments_forms.CommentsForm", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "zlapp.models.comments_model.CommentsInfo", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.ext.login.current_user.id", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.ext.login.current_user", "line_number": 67, "usage_type": "name"}, {"api_name": "zlapp.load.db.session.add", "line_number": 68, "usage_type": "call"}, {"api_name": "zlapp.load.db.session", "line_number": 68, "usage_type": "attribute"}, {"api_name": "zlapp.load.db", "line_number": 68, "usage_type": "name"}, {"api_name": "zlapp.load.db.session.commit", "line_number": 69, "usage_type": "call"}, {"api_name": "zlapp.load.db.session", "line_number": 69, "usage_type": "attribute"}, {"api_name": "zlapp.load.db", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 70, "usage_type": "call"}, {"api_name": "zlapp.models.posts_model.PostsInfo.query.filter_by", "line_number": 72, "usage_type": "call"}, {"api_name": "zlapp.models.posts_model.PostsInfo.query", "line_number": 72, "usage_type": "attribute"}, {"api_name": "zlapp.models.posts_model.PostsInfo", "line_number": 72, "usage_type": "name"}, {"api_name": "zlapp.models.comments_model.CommentsInfo.query.filter_by", "line_number": 73, "usage_type": "call"}, {"api_name": "zlapp.models.comments_model.CommentsInfo.query", "line_number": 73, "usage_type": "attribute"}, {"api_name": "zlapp.models.comments_model.CommentsInfo", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 74, "usage_type": "call"}, {"api_name": "zlapp.load.zl_app.route", "line_number": 59, "usage_type": "call"}, {"api_name": "zlapp.load.zl_app", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.ext.login.login_required", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "442928910", "text": "import yaml\nimport os\n\nclass Config(object):\n \"\"\"\"\"\"\n def __init__(self):\n if os.path.isfile('eval_config.yaml'):\n with open('eval_config.yaml') as f:\n config = f.read()\n self._config = yaml.load(config)\n else:\n self._config = {}\n def get(self, attr):\n return self._config.get(attr, None)\n\n\nconfig = Config()", "sub_path": "val/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 390, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.isfile", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "20492029", "text": "from django.urls import path\n\n\nfrom cart.api import views\n\n\napp_name = \"cart_api\"\n\nurlpatterns = [\n path('create/', views.CratItemCreateApiView.as_view(), name='cart_create'),\n path('cartitem_list/', views.CratItemListApiView.as_view(), name='cartitem_list'),\n path('checkout/', views.CartCheckOutApiView.as_view(), name='cart_checkout'),\n path('list/', views.CartListApiView.as_view(), name='cart_list'),\n path('detail//', views.CartDetailApiView.as_view(), name='cart_detail'),\n]\n", "sub_path": "cart/api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 525, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "cart.api.views.CratItemCreateApiView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "cart.api.views.CratItemCreateApiView", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cart.api.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "cart.api.views.CratItemListApiView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "cart.api.views.CratItemListApiView", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cart.api.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "cart.api.views.CartCheckOutApiView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "cart.api.views.CartCheckOutApiView", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cart.api.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "cart.api.views.CartListApiView.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "cart.api.views.CartListApiView", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cart.api.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "cart.api.views.CartDetailApiView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "cart.api.views.CartDetailApiView", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cart.api.views", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "154294569", "text": "import requests\nimport json\nimport sys\n\ndef get_client_name():\n\tclient_path = \"/etc/sensu/conf.d/client.json\"\n\tdata = json.load(open(client_path))\n\treturn data['client']['name']\n\ndef get_max_threads(local):\n\tparam = \"maxThreads\"\n\taddr = (\"http://%s/jolokia/read/jboss.web:type=ThreadPool,name=ajp-0.0.0.0-8009/%s\") % (local, param)\n\treq = requests.get(addr)\n\treturn req.json()['value']\n\ndef get_current_threads(local):\n\tparam = \"currentThreadsCount\"\n\taddr = (\"http://%s/jolokia/read/jboss.web:type=ThreadPool,name=ajp-0.0.0.0-8009/%s\") % (local, param)\n\treq = requests.get(addr)\n\treturn req.json()['value']\n\ndef get_current_threads_busy(local):\n\tparam = \"currentThreadsBusy\"\n\taddr = (\"http://%s/jolokia/read/jboss.web:type=ThreadPool,name=ajp-0.0.0.0-8009/%s\") % (local, param)\n\treq = requests.get(addr)\n\treturn req.json()['value']\n\ndef main():\n\tlocal = sys.argv[1]\n\tparam = sys.argv[2]\n\thost = get_client_name()\n\n\tif param == \"maxThreads\":\n\t\tprint (\"%s %s %s\") % (host, param, get_max_threads(local))\n\telif param == \"currentThreadsCount\":\n\t\tprint (\"%s %s %s\") % (host, param, get_current_threads_busy(local))\n\nif __name__ == '__main__':\n\tmain()\n", "sub_path": "plugins/metric-thread.py", "file_name": "metric-thread.py", "file_ext": "py", "file_size_in_byte": 1146, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "json.load", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 30, "usage_type": "attribute"}]} +{"seq_id": "299527229", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules.linear import Linear\n#from torch.nn.modules.distance import CosineDistance\n\n\nclass CustomLinear(Linear):\n r\"\"\"Applies a linear transformation to the incoming data: :math:`y = xA^T + b`\n\n Args:\n in_features: size of each input sample\n out_features: size of each output sample\n bias: If set to False, the layer will not learn an additive bias.\n Default: ``True``\n\n Shape:\n - Input: :math:`(N, *, \\text{in\\_features})` where :math:`*` means any number of\n additional dimensions\n - Output: :math:`(N, *, \\text{out\\_features})` where all but the last dimension\n are the same shape as the input.\n\n Attributes:\n weight: the learnable weights of the module of shape\n :math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are\n initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where\n :math:`k = \\frac{1}{\\text{in\\_features}}`\n bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`.\n If :attr:`bias` is ``True``, the values are initialized from\n :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{1}{\\text{in\\_features}}`\n\n Examples::\n\n >>> m = nn.Linear(20, 30)\n >>> input = torch.randn(128, 20)\n >>> output = m(input)\n >>> print(output.size())\n torch.Size([128, 30])\n \"\"\"\n __constants__ = ['bias']\n\n def __init__(self, in_features, out_features, bias=False):\n super(Linear, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.Tensor(out_features, in_features))\n if bias:\n self.bias = Parameter(torch.Tensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def forward(self, input):\n return F.linear(self.weight, input, self.bias)\n\n \n\nclass BasicModel(torch.nn.Module):\n def __init__(self, ngenes, nfeatures, embedding_size=100):\n super(BasicModel, self).__init__()\n \n #Embedding(num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None)[S\n self.layer_e = CustomLinear(ngenes, embedding_size)\n self.layer_f = CustomLinear(nfeatures, embedding_size)\n torch.nn.init.normal(self.layer_e.weight, mean=0.0, std=0.02)\n \n torch.nn.init.normal(self.layer_f.weight, mean=0.0, std=0.02)\n \n \n #self.cos = nn.CosineSimilarity(dim=1, eps=1e-6)\n\n def forward(self, e, f):\n h1 = self.layer_e(e) # \n h2 = self.layer_f(f) #\n #h1 = torch.nn.Sigmoid()(h1)\n #h2 = torch.nn.Sigmoid()(h2)\n #h1 = torch.sum(h1, dim=1)\n #h2 = torch.sum(h2, dim=1)\n #return h1\n output = F.cosine_similarity(h1, h2, dim=0)\n #x = torch.cat([h1, h2], 1)\n #h = self.hidden_layer(x)\n #out = self.output_layer(h)\n return output", "sub_path": "embeddings/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 3206, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.nn.modules.linear.Linear", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.modules.linear.Linear", "line_number": 45, "usage_type": "argument"}, {"api_name": "torch.nn.parameter.Parameter", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.parameter.Parameter", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn.functional.linear", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.nn.init.normal", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch.nn.init.normal", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.cosine_similarity", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "276332576", "text": "from google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\n\nmain_page = \"\"\"\n\n\n \n f-melody\n \n \n \n \n \n \n \n\"\"\" % \"/script/loader.js\"\n \n\nclass MainPage(webapp.RequestHandler):\n def get(self):\n self.response.headers['Content-Type'] = 'text/html'\n self.response.out.write(main_page)\n\napplication = webapp.WSGIApplication([('/', MainPage)], debug=True)\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "f-melody/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 19, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 19, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.WSGIApplication", "line_number": 24, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp", "line_number": 24, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.run_wsgi_app", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "314277718", "text": "import requests\nfrom monitor.test import MyHTMLParser\n\n\nurl = 'http://www.panda.tv/search'\nkey = '周二珂'\nargs = {'key': key}\nheader = {'user-agent': '\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:42.0) Gecko/20100101 Firefox/42.0\"'}\n# response = requests.request('get', url, params=args, headers=header)\n# print(response.status_code)\n# print(response.text)\n# parser = MyHTMLParser()\n# parser.feed(response.text)\n\ncookie = {'cookies': '\"__guid=96554777.4116491961764843500.1449752660757.6753; '\n 'monitor_count=5; '\n 'Hm_lvt_204071a8b1d0b2a04c782c44b88eb996=1449752661;'\n 'Hm_lpvt_204071a8b1d0b2a04c782c44b88eb996=1449752773\"'}\nresponse = requests.request('get', url, params=args, headers=header, cookies=cookie)\nprint(response.cookies.values())\nprint(response.headers)\nparser = MyHTMLParser()\nparser.feed(response.text)\n", "sub_path": "monitor/test3.py", "file_name": "test3.py", "file_ext": "py", "file_size_in_byte": 884, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.request", "line_number": 19, "usage_type": "call"}, {"api_name": "monitor.test.MyHTMLParser", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "643688567", "text": "from functools import lru_cache\n \nsteps_one_and_two = {\n 'A': ['G', 'BD'], \n 'B': ['JLG', 'EFA'], \n 'C': ['KMEH', 'G'], \n 'D': ['L', 'FHA'], \n 'E': ['PCL', 'IKB'], \n 'F': ['PQJ', 'KMDB'], \n 'G': ['RABKN', 'LC'], \n 'H': ['QCL', 'MND'], \n 'I': ['P', 'OE'], \n 'J': ['SBF', 'P'], \n 'K': ['TCGQ', 'OFE'], \n 'L': ['SUDBEHOR', 'PQG'], \n 'M': ['TCP', 'RHF'], \n 'N': ['GQ', 'RH'], \n 'O': ['LT', 'SKI'], \n 'P': ['VFEIMU', 'TLJ'], \n 'Q': ['VHFKNS', 'TL'], \n 'R': ['GLT', 'UNM'], \n 'S': ['LJQ', 'VO'], \n 'T': ['MKOR', 'QP'], \n 'U': ['LP', 'VR'], \n 'V': ['QP', 'US']\n}\n\n@lru_cache()\ndef main(k, n, step=1, value=0):\n \"\"\"\n The function returns the unique number of strings possible after n steps in\n depending on the selected starting point k.\n \"\"\"\n combinations_for_step = steps_one_and_two[k][(step + 1) % 2]\n if step == n: \n return len(combinations_for_step)\n for letter in combinations_for_step:\n value += main(k=letter, n=n, step=step+1)\n return value\n\nargv = input()\nk, n = argv.split(',')\nQ = main(k=k, n=int(n[:-1])) \nprint(Q)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1130, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "functools.lru_cache", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "196978616", "text": "import data\nimport view\n\nis_running = True\n\nwhile is_running:\n view.show_menu()\n user_input = input('Choose an option: ')\n view.divider()\n\n if user_input == '1':\n print('Add Task')\n new_task_description = input(\"What is the description? \")\n new_task_priority = input(\"What is the priority (low, med, high)? \")\n new_task_priority = data.convert_wording_to_priority_int(new_task_priority)\n\n new_task_description = data.Task(new_task_description, new_task_priority)\n \n print(\"Task was added successfully\")\n elif user_input == '2':\n index_of_task_to_delete = view.select_task('remove')\n data.tasks.pop(index_of_task_to_delete)\n print('Task removed successfully')\n elif user_input == '3':\n view.show_tasks()\n elif user_input == '4':\n index_of_task_to_update_priority = view.select_task('update priority')\n new_priority = input(\"What is the new priority (low,med,high)? \")\n data.tasks[index_of_task_to_update_priority][\n \"priority\"] = data.convert_wording_to_priority_int(new_priority)\n print(\"The task's priority successfully\")\n elif user_input == 'q':\n is_running = False\n", "sub_path": "Projects/Python/Week 1/ToDoApplication/ToDoApp2/todoapp.py", "file_name": "todoapp.py", "file_ext": "py", "file_size_in_byte": 1215, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "view.show_menu", "line_number": 7, "usage_type": "call"}, {"api_name": "view.divider", "line_number": 9, "usage_type": "call"}, {"api_name": "data.convert_wording_to_priority_int", "line_number": 15, "usage_type": "call"}, {"api_name": "data.Task", "line_number": 17, "usage_type": "call"}, {"api_name": "view.select_task", "line_number": 21, "usage_type": "call"}, {"api_name": "data.tasks.pop", "line_number": 22, "usage_type": "call"}, {"api_name": "data.tasks", "line_number": 22, "usage_type": "attribute"}, {"api_name": "view.show_tasks", "line_number": 25, "usage_type": "call"}, {"api_name": "view.select_task", "line_number": 27, "usage_type": "call"}, {"api_name": "data.tasks", "line_number": 29, "usage_type": "attribute"}, {"api_name": "data.convert_wording_to_priority_int", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "76178190", "text": "from ..dtml import DTMLRegexRewriter\nfrom ..main import main\nfrom ..pagetemplates import PTParserRewriter\nimport pathlib\nimport pkg_resources\nimport pytest\nimport shutil\n\nFIXTURE_DIR = pkg_resources.resource_filename(\n 'gocept.template_rewrite.tests', 'fixture')\n\n\n@pytest.fixture('function')\ndef files(tmpdir):\n \"\"\"Create a copy of the fixture directory in a temporary directory.\"\"\"\n dir = str(tmpdir.join('fixture'))\n shutil.copytree(FIXTURE_DIR, dir)\n yield pathlib.Path(dir)\n\n\ndef test_main__main__1(files, caplog):\n \"\"\"It converts all files in the given directory.\"\"\"\n main([str(files)])\n res_files = [x.name for x in files.iterdir()]\n assert ['README.txt',\n 'broken.html',\n 'broken.pt',\n 'one.pt',\n 'two.dtml'] == sorted(res_files)\n assert caplog.text.count('Processing') == 4\n assert caplog.text.count('Parsing error') == 1\n\n\ndef test_main__main__2(files):\n \"\"\"It does not touch the original files on `--keep-files`.\"\"\"\n main([str(files), '--keep-files'])\n res_files = [x.name for x in files.iterdir()]\n assert [\n 'README.txt',\n 'broken.html',\n 'broken.html.out',\n 'broken.pt',\n 'broken.pt.out',\n 'one.pt',\n 'one.pt.out',\n 'two.dtml',\n 'two.dtml.out',\n ] == sorted(res_files)\n # Source files are not changed:\n for file in pathlib.Path(FIXTURE_DIR).iterdir():\n source = file.read_text()\n dest = files.joinpath(file.name).read_text()\n assert dest == source\n\n\ndef test_main__main__3(files, caplog):\n \"\"\"It reports parsing errors on `--only-check-syntax`.\"\"\"\n main([str(files), '--only-check-syntax'])\n res_files = [x.name for x in files.iterdir()]\n assert ['README.txt',\n 'broken.html',\n 'broken.pt',\n 'one.pt',\n 'two.dtml'] == sorted(res_files)\n assert caplog.text.count('Processing') == 4\n assert caplog.text.count('Parsing error') == 1\n # Source files are not changed:\n for file in pathlib.Path(FIXTURE_DIR).iterdir():\n source = file.read_text()\n dest = files.joinpath(file.name).read_text()\n assert dest == source\n\n\ndef test_main__main__4(files, caplog):\n \"\"\"It accepts a list of files as argument.\"\"\"\n main([str(files / 'broken.pt'), str(files / 'one.pt')])\n assert caplog.text.count('Processing') == 2\n assert caplog.text.count('Parsing error') == 1\n\n\ndef test_main__main__5(files, mocker):\n \"\"\"It treats all files as PageTemplate on `--force=pt`.\"\"\"\n mocker.spy(DTMLRegexRewriter, '__call__')\n mocker.spy(PTParserRewriter, '__call__')\n main([str(files), '--force=pt'])\n assert DTMLRegexRewriter.__call__.call_count == 0\n assert PTParserRewriter.__call__.call_count == 5\n\n\ndef test_main__main__6(files, mocker):\n \"\"\"It treats all files as DocumentTemplate on `--force=dtml`.\"\"\"\n mocker.spy(DTMLRegexRewriter, '__call__')\n mocker.spy(PTParserRewriter, '__call__')\n main([str(files), '--force=dtml'])\n assert DTMLRegexRewriter.__call__.call_count == 5\n assert PTParserRewriter.__call__.call_count == 0\n\n\ndef test_main__PTParserRewriter__1(files, mocker):\n \"\"\"It skips rewrite of a file without `tal:` in content.\"\"\"\n mocker.spy(PTParserRewriter, 'rewrite_zpt')\n main([str(files / 'broken.html'), str(files / 'one.pt')])\n # broken.html is not rewritten\n assert PTParserRewriter.rewrite_zpt.call_count == 1\n", "sub_path": "src/gocept/template_rewrite/tests/test_main.py", "file_name": "test_main.py", "file_ext": "py", "file_size_in_byte": 3447, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pkg_resources.resource_filename", "line_number": 9, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 17, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 13, "usage_type": "call"}, {"api_name": "main.main", "line_number": 23, "usage_type": "call"}, {"api_name": "main.main", "line_number": 36, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 50, "usage_type": "call"}, {"api_name": "main.main", "line_number": 58, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 68, "usage_type": "call"}, {"api_name": "main.main", "line_number": 76, "usage_type": "call"}, {"api_name": "dtml.DTMLRegexRewriter", "line_number": 83, "usage_type": "argument"}, {"api_name": "pagetemplates.PTParserRewriter", "line_number": 84, "usage_type": "argument"}, {"api_name": "main.main", "line_number": 85, "usage_type": "call"}, {"api_name": "dtml.DTMLRegexRewriter.__call__", "line_number": 86, "usage_type": "attribute"}, {"api_name": "dtml.DTMLRegexRewriter", "line_number": 86, "usage_type": "name"}, {"api_name": "pagetemplates.PTParserRewriter.__call__", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pagetemplates.PTParserRewriter", "line_number": 87, "usage_type": "name"}, {"api_name": "dtml.DTMLRegexRewriter", "line_number": 92, "usage_type": "argument"}, {"api_name": "pagetemplates.PTParserRewriter", "line_number": 93, "usage_type": "argument"}, {"api_name": "main.main", "line_number": 94, "usage_type": "call"}, {"api_name": "dtml.DTMLRegexRewriter.__call__", "line_number": 95, "usage_type": "attribute"}, {"api_name": "dtml.DTMLRegexRewriter", "line_number": 95, "usage_type": "name"}, {"api_name": "pagetemplates.PTParserRewriter.__call__", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pagetemplates.PTParserRewriter", "line_number": 96, "usage_type": "name"}, {"api_name": "pagetemplates.PTParserRewriter", "line_number": 101, "usage_type": "argument"}, {"api_name": "main.main", "line_number": 102, "usage_type": "call"}, {"api_name": "pagetemplates.PTParserRewriter.rewrite_zpt", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pagetemplates.PTParserRewriter", "line_number": 104, "usage_type": "name"}]} +{"seq_id": "296155809", "text": "# pylint: disable=missing-module-docstring\nimport numpy as np\nimport pandas as pd\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nfrom scipy.spatial.distance import squareform\nfrom sklearn.covariance import OAS\nfrom mlfinlab.portfolio_optimization.returns_estimators import ReturnsEstimation\nfrom mlfinlab.portfolio_optimization.risk_metrics import RiskMetrics\n\n\nclass HierarchicalRiskParity:\n \"\"\"\n This class implements the Hierarchical Risk Parity algorithm mentioned in the following paper: `López de Prado, Marcos,\n Building Diversified Portfolios that Outperform Out-of-Sample (May 23, 2016). Journal of Portfolio Management,\n 2016 `_; The code is reproduced with modification from his book:\n Advances in Financial Machine Learning, Chp-16\n\n By removing exact analytical approach to the calculation of weights and instead relying on an approximate\n machine learning based approach (hierarchical tree-clustering), Hierarchical Risk Parity produces weights which are stable to\n random shocks in the stock-market. Moreover, previous algorithms like CLA involve the inversion of covariance matrix which is\n a highly unstable operation and tends to have major impacts on the performance due to slight changes in the covariance matrix.\n By removing dependence on the inversion of covariance matrix completely, the Hierarchical Risk Parity algorithm is fast,\n robust and flexible.\n \"\"\"\n\n def __init__(self):\n self.weights = list()\n self.seriated_correlations = None\n self.seriated_distances = None\n self.ordered_indices = None\n self.clusters = None\n self.returns_estimator = ReturnsEstimation()\n self.risk_metrics = RiskMetrics()\n\n @staticmethod\n def _tree_clustering(correlation, method='single'):\n \"\"\"\n Perform the traditional heirarchical tree clustering.\n\n :param correlation: (np.array) correlation matrix of the assets\n :param method: (str) the type of clustering to be done\n :return: distance matrix and clusters\n \"\"\"\n\n distances = np.sqrt((1 - correlation).round(5) / 2)\n clusters = linkage(squareform(distances.values), method=method)\n return distances, clusters\n\n def _quasi_diagnalization(self, num_assets, curr_index):\n \"\"\"\n Rearrange the assets to reorder them according to hierarchical tree clustering order.\n\n :param num_assets: (int) the total number of assets\n :param curr_index: (int) current index\n :return: (list) the assets rearranged according to hierarchical clustering\n \"\"\"\n\n if curr_index < num_assets:\n return [curr_index]\n\n left = int(self.clusters[curr_index - num_assets, 0])\n right = int(self.clusters[curr_index - num_assets, 1])\n\n return (self._quasi_diagnalization(num_assets, left) + self._quasi_diagnalization(num_assets, right))\n\n def _get_seriated_matrix(self, assets, distances, correlations):\n \"\"\"\n Based on the quasi-diagnalization, reorder the original distance matrix, so that assets within\n the same cluster are grouped together.\n\n :param assets: (list) list of asset names in the portfolio\n :param distances: (pd.Dataframe) distance values between asset returns\n :param correlations: (pd.Dataframe) correlations between asset returns\n :return: (np.array) re-arranged distance matrix based on tree clusters\n \"\"\"\n\n ordering = assets[self.ordered_indices]\n seriated_distances = distances.loc[ordering, ordering]\n seriated_correlations = correlations.loc[ordering, ordering]\n return seriated_distances, seriated_correlations\n\n @staticmethod\n def _get_inverse_variance_weights(covariance):\n \"\"\"\n Calculate the inverse variance weight allocations.\n\n :param covariance: (pd.Dataframe) covariance matrix of assets\n :return: (list) inverse variance weight values\n \"\"\"\n\n inv_diag = 1 / np.diag(covariance.values)\n parity_w = inv_diag * (1 / np.sum(inv_diag))\n return parity_w\n\n def _get_cluster_variance(self, covariance, cluster_indices):\n \"\"\"\n Calculate cluster variance.\n\n :param covariance: (pd.Dataframe) covariance matrix of assets\n :param cluster_indices: (list) list of asset indices for the cluster\n :return: (float) variance of the cluster\n \"\"\"\n\n cluster_covariance = covariance.iloc[cluster_indices, cluster_indices]\n parity_w = self._get_inverse_variance_weights(cluster_covariance)\n cluster_variance = self.risk_metrics.calculate_variance(covariance=cluster_covariance, weights=parity_w)\n return cluster_variance\n\n def _recursive_bisection(self, covariance, assets):\n \"\"\"\n Recursively assign weights to the clusters - ultimately assigning weights to the inidividual assets.\n\n :param covariance: (pd.Dataframe) the covariance matrix\n :param assets: (list) list of asset names in the portfolio\n \"\"\"\n\n self.weights = pd.Series(1, index=self.ordered_indices)\n clustered_alphas = [self.ordered_indices]\n\n while clustered_alphas:\n clustered_alphas = [cluster[start:end]\n for cluster in clustered_alphas\n for start, end in ((0, len(cluster) // 2), (len(cluster) // 2, len(cluster)))\n if len(cluster) > 1]\n\n for subcluster in range(0, len(clustered_alphas), 2):\n left_cluster = clustered_alphas[subcluster]\n right_cluster = clustered_alphas[subcluster + 1]\n\n # Get left and right cluster variances and calculate allocation factor\n left_cluster_variance = self._get_cluster_variance(covariance, left_cluster)\n right_cluster_variance = self._get_cluster_variance(covariance, right_cluster)\n alloc_factor = 1 - left_cluster_variance / (left_cluster_variance + right_cluster_variance)\n\n # Assign weights to each sub-cluster\n self.weights[left_cluster] *= alloc_factor\n self.weights[right_cluster] *= 1 - alloc_factor\n\n # Assign actual asset values to weight index\n self.weights.index = assets[self.ordered_indices]\n self.weights = pd.DataFrame(self.weights)\n self.weights = self.weights.T\n\n def plot_clusters(self, assets):\n \"\"\"\n Plot a dendrogram of the hierarchical clusters.\n\n :param assets: (list) list of asset names in the portfolio\n \"\"\"\n\n dendrogram_plot = dendrogram(self.clusters, labels=assets)\n return dendrogram_plot\n\n @staticmethod\n def _shrink_covariance(asset_returns):\n \"\"\"\n Regularise/Shrink the asset covariances.\n\n :param asset_returns: (pd.Dataframe) asset returns\n :return: (pd.Dataframe) shrinked asset returns covariances\n \"\"\"\n\n oas = OAS()\n oas.fit(asset_returns)\n shrinked_covariance = oas.covariance_\n return shrinked_covariance\n\n @staticmethod\n def _cov2corr(covariance):\n \"\"\"\n Calculate the correlations from asset returns covariance matrix.\n\n :param covariance: (pd.Dataframe) asset returns covariances\n :return: (pd.Dataframe) correlations between asset returns\n \"\"\"\n\n d_matrix = np.zeros_like(covariance)\n diagnoal_sqrt = np.sqrt(np.diag(covariance))\n np.fill_diagonal(d_matrix, diagnoal_sqrt)\n d_inv = np.linalg.inv(d_matrix)\n corr = np.dot(np.dot(d_inv, covariance), d_inv)\n corr = pd.DataFrame(corr, index=covariance.columns, columns=covariance.columns)\n return corr\n\n def allocate(self,\n asset_names,\n asset_prices=None,\n asset_returns=None,\n covariance_matrix=None,\n resample_by=None,\n use_shrinkage=False):\n # pylint: disable=invalid-name, too-many-branches\n \"\"\"\n Calculate asset allocations using HRP algorithm.\n\n :param asset_names: (list) a list of strings containing the asset names\n :param asset_prices: (pd.Dataframe) a dataframe of historical asset prices (daily close)\n indexed by date\n :param asset_returns: (pd.Dataframe/numpy matrix) user supplied matrix of asset returns\n :param covariance_matrix: (pd.Dataframe/numpy matrix) user supplied covariance matrix of asset returns\n :param resample_by: (str) specifies how to resample the prices - weekly, daily, monthly etc.. Defaults to\n None for no resampling\n :param use_shrinkage: (Boolean) specifies whether to shrink the covariances\n \"\"\"\n\n if asset_prices is None and asset_returns is None and covariance_matrix is None:\n raise ValueError(\"You need to supply either raw prices or returns or a covariance matrix of asset returns\")\n\n if asset_prices is not None:\n if not isinstance(asset_prices, pd.DataFrame):\n raise ValueError(\"Asset prices matrix must be a dataframe\")\n if not isinstance(asset_prices.index, pd.DatetimeIndex):\n raise ValueError(\"Asset prices dataframe must be indexed by date.\")\n\n # Calculate the returns if the user does not supply a returns dataframe\n if asset_returns is None and covariance_matrix is None:\n asset_returns = self.returns_estimator.calculate_returns(asset_prices=asset_prices, resample_by=resample_by)\n asset_returns = pd.DataFrame(asset_returns, columns=asset_names)\n\n # Calculate covariance of returns or use the user specified covariance matrix\n if covariance_matrix is None:\n if use_shrinkage:\n covariance_matrix = self._shrink_covariance(asset_returns=asset_returns)\n else:\n covariance_matrix = asset_returns.cov()\n cov = pd.DataFrame(covariance_matrix, index=asset_names, columns=asset_names)\n\n # Calculate correlation from covariance matrix\n corr = self._cov2corr(covariance=cov)\n\n # Step-1: Tree Clustering\n distances, self.clusters = self._tree_clustering(correlation=corr)\n\n # Step-2: Quasi Diagnalization\n num_assets = len(asset_names)\n self.ordered_indices = self._quasi_diagnalization(num_assets, 2 * num_assets - 2)\n self.seriated_distances, self.seriated_correlations = self._get_seriated_matrix(assets=asset_names,\n distances=distances,\n correlations=corr)\n\n # Step-3: Recursive Bisection\n self._recursive_bisection(covariance=cov, assets=asset_names)\n", "sub_path": "mlfinlab/portfolio_optimization/hrp.py", "file_name": "hrp.py", "file_ext": "py", "file_size_in_byte": 10981, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "mlfinlab.portfolio_optimization.returns_estimators.ReturnsEstimation", "line_number": 32, "usage_type": "call"}, {"api_name": "mlfinlab.portfolio_optimization.risk_metrics.RiskMetrics", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.cluster.hierarchy.linkage", "line_number": 46, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.squareform", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 92, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 117, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 141, "usage_type": "call"}, {"api_name": "scipy.cluster.hierarchy.dendrogram", "line_number": 151, "usage_type": "call"}, {"api_name": "sklearn.covariance.OAS", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 180, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 181, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 182, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 210, "usage_type": "attribute"}, {"api_name": "pandas.DatetimeIndex", "line_number": 212, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 218, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 226, "usage_type": "call"}]} +{"seq_id": "637601545", "text": "from urllib.request import urlopen as uOpen, Request as uReq\nfrom bs4 import BeautifulSoup as soup, NavigableString\n\nuClient = uReq('https://bulbapedia.bulbagarden.net/w/index.php?title=Category:Pok%C3%A9mon_that_are_part_of_a_three-stage_evolutionary_line&pagefrom=Raichu+%28Pok%C3%A9mon%29#mw-pages', headers={'User-Agent': 'Magic Browser'})\nuCon = uOpen(uClient, None, 5)\npoke_page_html = uCon.read()\nuCon.close()\n\nps = soup(poke_page_html, 'html.parser')\nps = ps.find(attrs={'id': 'mw-content-text'}).find('div', attrs={'class': 'mw-category'})\n\n\nwith open('d.txt', 'a') as f:\n ps = ps.find_all('a')\n ps = [p.text.replace(' (Pokémon)', '\\n') for p in ps]\n for p in ps:\n f.write(p)\n", "sub_path": "evolution.py", "file_name": "evolution.py", "file_ext": "py", "file_size_in_byte": 722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "urllib.request.Request", "line_number": 4, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 5, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "109004575", "text": "\"\"\"changed fields on pages.Template\n\nRevision ID: 4cd505f67b57\nRevises: 26349a2988c2\nCreate Date: 2014-07-30 17:36:18.457018\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4cd505f67b57'\ndown_revision = '26349a2988c2'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('templates', sa.Column('description', sa.Text(), nullable=True))\n op.drop_column('templates', 'is_base_template')\n op.drop_column('templates', 'filename')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('templates', sa.Column('filename', mysql.VARCHAR(length=255), nullable=True))\n op.add_column('templates', sa.Column('is_base_template', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True))\n op.drop_column('templates', 'description')\n ### end Alembic commands ###\n", "sub_path": "flask_cms/migrations/4cd505f67b57_changed_fields_on_pages_template.py", "file_name": "4cd505f67b57_changed_fields_on_pages_template.py", "file_ext": "py", "file_size_in_byte": 996, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "alembic.op.add_column", "line_number": 19, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 19, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 19, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 20, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 27, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.TINYINT", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 28, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "318989404", "text": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\n#%matplotlib inline\n\n\n# In[2]:\n\n\nDATA_DIR = '../data'\nfiles = os.listdir(DATA_DIR)\nprint(files)\ntraindata = pd.read_csv(os.path.join(DATA_DIR, files[0]))\nallkeys = pd.read_csv(os.path.join(DATA_DIR, files[1]))\nsample_submission = pd.read_csv(os.path.join(DATA_DIR, files[2]))\n\n\n# In[ ]:\n\n\nsample_submission = pd.read_csv(os.path.join(DATA_DIR, files[2]))\n\n\n# In[44]:\n\n\ndef get_time_avg(data):\n time_avg = data.mean(axis=1, skipna=True)\n time_avg[time_avg.isnull()] = 0\n return time_avg\n\ndef get_time_median(data):\n time_median = data.median(axis=1, skipna=True)\n time_median[time_median.isnull()] = 0\n return time_median\n\n\n# make prediction with median\n#time_avg_with_0 = pd.read_pickle('./output/time_avg_with_0')\n#median_all_pages = time_avg_with_0.median()\n#sample_submission['Visits'] = median_all_pages\n#sample_submission.to_csv('./output/sub_time_avg_with_0.gz', index=False, compression='gzip')\n\n\n\n\n#time_median = get_time_median(traindata)\n\n\n# In[47]:\n\n\n#time_median.to_pickle('./output/time_median_with_0')\n\n\n# In[ ]:\n\n\n# find index of page in traindata\n#for i in range(0,len(sample_submission),60):\n# keystring = '_'.join(allkeys['Page'].loc[i].split('_')[:-1])\n# sample_submission.loc[i:i+60,'Visits'] = float(time_median.loc[traindata['Page'] == keystring])\n\n# sample_submission.to_csv('./output/sub_sample_median_with_0.gz', index=False, compression='gzip')\n\n\n# In[ ]:\n\n\nif __name__ == '__main__':\n\ttime_median = pd.read_pickle('./output/time_median_with_0')\n\tfor i in tqdm(range(0,len(sample_submission),60)):\n\t\tkeystring = '_'.join(allkeys['Page'].loc[i].split('_')[:-1])\n\t\tsample_submission.loc[i:i+60,'Visits'] = float(time_median.loc[traindata['Page'] == keystring])\n\t\n\tsample_submission.to_csv('./output/sub_sample_median_with_0.gz', index=False, compression='gzip')\n\n\n\n", "sub_path": "explorative_models/global_average_pred.py", "file_name": "global_average_pred.py", "file_ext": "py", "file_size_in_byte": 1946, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 80, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "501650970", "text": "import os, random, time, copy\nfrom skimage import io, transform\nimport numpy as np\nimport os.path as path\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler \nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport torchvision\nfrom torchvision import datasets, models, transforms\n\n\nfrom models.pixel_embedding_model import *\nfrom models.segm_basic_model import *\nfrom models.customized_loss import *\n\nclass cosSimLoss(nn.Module):\n def __init__(self, randNum=-100, device='cpu', margin=0.4,\n size_average=None, reduction='elementwise_mean'):\n super(cosSimLoss, self).__init__()\n self.weight = None\n self.size_average = size_average\n self.reduction = reduction \n self.randNum = randNum\n self.device = device\n self.randIdx_H = 0\n self.randIdx_W = 0\n self.margin = margin\n \n def forward(self, inputs, target): \n weightsFromNum = target.clone()\n weightsFromNum[weightsFromNum!=0] = 1\n weightsFromNum[weightsFromNum==0] = 0.05\n \n if self.randNum>0:\n inputs_size = inputs.size()\n tensor_way = len(inputs_size)\n self.randIdx_W = np.random.choice(inputs_size[-1], self.randNum, replace=False)\n self.randIdx_W = torch.from_numpy(self.randIdx_W).long().to(self.device)\n self.randIdx_H = np.random.choice(inputs_size[-2], self.randNum, replace=False)\n self.randIdx_H = torch.from_numpy(self.randIdx_H).long().to(self.device)\n\n inputs = self.rand_sample_pixels(inputs)\n target = self.rand_sample_pixels(target)\n weightsFromNum = self.rand_sample_pixels(weightsFromNum)\n \n \n inputs_NCM = inputs.view(inputs.size(0),inputs.size(1),-1) # NxCxHxW --> NxCxM, where M=H*W\n inputs_NMC = inputs_NCM.permute(0,2,1) # NxCxM --> NxMxC, permute axes \n cosSimMat = torch.matmul(inputs_NMC, inputs_NCM)\n cosSimMat = cosSimMat.clamp(-1,1)# torch.clamp(input, min, max, out=None)\n cosSimMat.add_(1).mul_(0.5)\n \n \n # findicator vector --> pair-wise binary matrix showing whether two points have the same label\n target_simMat = self.indicator_to_similarity_matrix(target)\n\n # generate per-pixel weight \n weightsFromNum = self.gen_per_pixel_weight_matrix(weightsFromNum)\n \n # computing the loss over cosSimMat and target_simMat (0: inter-obj, 1:inner-obj)\n cosSimMat = cosSimMat.view(cosSimMat.size(0),-1)\n target_simMat = target_simMat.view(target_simMat.size(0),-1)\n weightsFromNum = weightsFromNum.view(weightsFromNum.size(0),-1)\n \n totalNum = target_simMat.size(0)*target_simMat.size(1)+2\n posNum = torch.sum(target_simMat.view(-1))+1\n negNum = totalNum-posNum +1\n weight_neg = 1.0/negNum\n weight_pos = 1.0/posNum\n \n \n posPairLoss = torch.mul(1-cosSimMat, target_simMat) * weightsFromNum\n \n #negPairLoss = torch.mul(cosSimMat, 1-target_simMat) \n negPairLoss = (cosSimMat-self.margin) * (1-target_simMat) * weightsFromNum \n negPairLoss = negPairLoss.clamp(min=0)\n \n posPairLoss = torch.sum(torch.sum(torch.sum(posPairLoss))) * weight_pos\n negPairLoss = torch.sum(torch.sum(torch.sum(negPairLoss))) * weight_neg\n \n lossValue = posPairLoss + negPairLoss\n return lossValue \n \n \n def gen_per_pixel_weight_matrix(self, weight):\n # generate the per-pixel weight for training\n weight_NxM = weight.view(weight.size(0), -1) # NxHxW --> NxM\n weight_NxMx1 = weight_NxM.unsqueeze(-1) # NxM --> NxMx1\n weight_NxMx1 = weight_NxMx1.expand(weight_NxM.size(0), weight_NxM.size(1), weight_NxM.size(1)) \n #weight_Nx1xM = weight_NxM.unsqueeze(1) # NxM --> Nx1xM\n #weight_Nx1xM = weight_Nx1xM.expand(weight_Nx1xM.size(0), weight_Nx1xM.size(2), weight_Nx1xM.size(2))\n #weight_simMat = torch.mul(weight_NxMx1, weight_Nx1xM)\n weight_simMat = torch.mul(weight_NxMx1, weight_NxMx1.permute(0,2,1)) \n weight_simMat = weight_simMat.type('torch.FloatTensor')\n return weight_simMat.to(self.device)\n \n \n def indicator_to_similarity_matrix(self, target):\n # per-pixel weight \n WeightMat = target.clone()\n WeightMat[WeightMat!=0] = 5\n WeightMat[WeightMat==0] = 1\n \n # generate the target for training\n target_NxM = target.view(target.size(0), -1) # NxHxW --> NxM\n target_NxMx1 = target_NxM.unsqueeze(-1) # NxM --> NxMx1\n target_NxMx1 = target_NxMx1.expand(target_NxM.size(0), target_NxM.size(1), target_NxM.size(1)) \n target_Nx1xM = target_NxM.unsqueeze(1) # NxM --> Nx1xM\n target_Nx1xM = target_Nx1xM.expand(target_Nx1xM.size(0), target_Nx1xM.size(2), target_Nx1xM.size(2)) \n target_simMat = target_NxMx1.eq(target_Nx1xM)\n target_simMat = target_simMat.type('torch.FloatTensor')\n return target_simMat.to(self.device)\n \n \n \n def rand_sample_pixels(self, inputs):\n inputs_size = inputs.size()\n tensor_way = len(inputs_size) \n inputs = torch.index_select(inputs, tensor_way-2, self.randIdx_H, out=None) \n inputs = torch.index_select(inputs, tensor_way-1, self.randIdx_W, out=None) \n return inputs", "sub_path": "tutorial_pytorch_05_flow/models/customized_loss.py", "file_name": "customized_loss.py", "file_ext": "py", "file_size_in_byte": 5614, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.nn.Module", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "265555478", "text": "# nbtoc - ipython extension\n#\n# Print a table of contents for IPython notebook file (.ipynb)\n# Put this file to ~/.ipython/extensions\n#\n# Usage:\n# %load_ext nbtoc\n# %print_toc welcome.ipynb\n#\n# Author: Cho-Yi Chen (ntu.joey@gmail.com)\nimport sys\nimport json\nimport IPython\nfrom IPython.core.magic import magics_class, line_magic, Magics\nfrom IPython.core.display import HTML\n\n@magics_class\nclass nbtoc(Magics):\n @line_magic\n def print_toc(self, line):\n \"\"\"Print a table of contents for an ipynb.\n \n Usage:\n %print_toc [, MAX]\n \n Arguments:\n : The ipynb file to read.\n MAX: How deep the header level to print.\n \"\"\"\n if \",\" in line:\n PATH, MAX = line.split(\",\", 1)\n MAX = int(MAX)\n else:\n PATH = line\n MAX = 2 # default\n\n f = json.load(open(PATH))\n self.out = []\n for cell in f[\"cells\"]:\n if cell['cell_type'] == 'markdown':\n if cell['source'][0].startswith(\"#\"):\n self.out.append(self.get_heading(cell['source'][0], max=MAX))\n #print \"\\n\".join(self.out)\n return self\n\n\n def _repr_html_(self):\n # from markdown\n # return markdown.markdown(\"\\n\".join(self.out))\n # from a list of elements [lv, str, url]\n html = \"\"\n old_lv = 1\n html += \"
    \"\n for h in self.out:\n lv, s, url = h\n if lv > old_lv:\n html += \"
      \"\n if lv < old_lv:\n html += \"
    \"\n html += '
  1. %s
  2. ' % (url, s)\n old_lv = lv\n html += \"
\"\n return html\n\n def get_heading(self, s, min=1, max=2):\n if s.startswith(\"####\"):\n lv = 4\n elif s.startswith(\"###\"):\n lv = 3\n elif s.startswith(\"##\"):\n lv = 2\n elif s.startswith(\"#\"):\n lv = 1\n else:\n lv = 0\n if min <= lv <= max:\n s = s.lstrip(\"#\").strip()\n # to markdown\n #return \"%s* [%s](#%s)\" % (' ' * (lv-1)*2, s, s.replace(' ', '-'))\n # to a list of elements\n return [lv, s, s.replace(' ', '-')]\n\ndef load_ipython_extension(ipython):\n ipython.register_magics(nbtoc)\n\n", "sub_path": "extensions/nbtoc.py", "file_name": "nbtoc.py", "file_ext": "py", "file_size_in_byte": 2342, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "IPython.core.magic.Magics", "line_number": 18, "usage_type": "name"}, {"api_name": "json.load", "line_number": 37, "usage_type": "call"}, {"api_name": "IPython.core.magic.line_magic", "line_number": 19, "usage_type": "name"}, {"api_name": "IPython.core.magic.magics_class", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "305282075", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport os\ntry:\n from core.libs.console import AdvancedConsole\nexcept:\n from console import AdvancedConsole\ntry:\n import core.libs.converters as Converters\nexcept:\n import converters as Converters\ntry:\n from core.libs.wpasupplicantconf import WpaSupplicantConf\nexcept:\n from wpasupplicantconf import WpaSupplicantConf\n\nclass MacWirelessNetworks(AdvancedConsole):\n \"\"\"\n Return list of wireless networks\n \"\"\"\n\n CACHE_DURATION = 2.0\n \n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n AdvancedConsole.__init__(self)\n\n #members\n self._binary = u'/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport'\n self._command = u'/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport --scan'\n self.logger = logging.getLogger(self.__class__.__name__)\n self.networks = {}\n self.error = False\n self.timestamp = None\n self.__last_scanned_interface = None\n\n def is_installed(self):\n \"\"\" \n Return True if command binary exists\n\n Return:\n bool: True if binary exists\n \"\"\"\n return os.path.exists(self._binary)\n\n def __refresh(self, interface):\n \"\"\"\n Refresh list of networks\n \"\"\"\n #check if refresh is needed\n if self.timestamp is not None and time.time()-self.timestamp<=self.CACHE_DURATION and self.__last_scanned_interface==interface:\n self.logger.debug('Don\\'t refresh')\n return\n\n #execute command\n self.__last_scanned_interface = interface\n results = self.find(self._command, r'\\s+(.*?)\\s+(?:(?:.{2}:){5}.{2})\\s+(-\\d+)\\s+(?:.*)\\s+(?:Y|N)\\s+.{2}\\s+(.*)', timeout=15.0)\n self.logger.debug(results)\n\n #handle invalid interface for wifi scanning or disabled interface\n if len(results)==0 and self.get_last_return_code()!=0:\n self.networks = {}\n self.error = True\n return\n\n #parse results\n entries = {}\n for group, groups in results:\n #filter None values\n groups = list(filter(None, groups))\n self.logger.debug(groups)\n\n #handle encryption\n encryption = groups[2].lower()\n if encryption.find(u'wpa2')!=-1:\n encryption = WpaSupplicantConf.ENCRYPTION_TYPE_WPA2\n elif encryption.find(u'wpa')!=-1:\n encryption = WpaSupplicantConf.ENCRYPTION_TYPE_WPA\n elif encryption.find(u'wep')!=-1:\n encryption = WpaSupplicantConf.ENCRYPTION_TYPE_WEP\n elif encryption.find(u'none')!=-1:\n encryption = WpaSupplicantConf.ENCRYPTION_TYPE_UNSECURED\n else:\n encryption = WpaSupplicantConf.ENCRYPTION_TYPE_UNKNOWN\n\n #handle signal level\n signal_level = Converters.dbm_to_percent(int(groups[1]))\n\n #save entry\n entries[groups[0]] = {\n u'interface': interface,\n u'network': groups[0],\n u'encryption': encryption,\n u'signallevel': signal_level\n }\n self.logger.debug('entries: %s' % entries)\n\n #save networks and error\n self.networks = entries\n self.error = False\n\n def has_error(self):\n \"\"\"\n Return True if error occured\n \n Return:\n bool: True if error, False otherwise\n \"\"\"\n return self.error\n\n def get_networks(self, interface):\n \"\"\"\n Return all wifi networks scanned\n\n Args:\n interface (string): interface name\n\n Returns:\n dict: dictionnary of found wifi networks::\n {\n network: {\n interface (string): interface scanned\n network (string): wifi network name\n encryption (string): encryption type (TODO)\n signallevel (float): wifi signal level\n },\n ...\n }\n \"\"\"\n self.__refresh(interface)\n\n return self.networks\n\nif __name__ == '__main__':\n import pprint\n pp = pprint.PrettyPrinter(indent=2)\n\n logging.basicConfig(level=logging.DEBUG)\n\n m = MacWirelessNetworks()\n nets = m.get_networks('en1')\n pp.pprint(nets)\n\n\n", "sub_path": "core/libs/macwirelessnetworks.py", "file_name": "macwirelessnetworks.py", "file_ext": "py", "file_size_in_byte": 4471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "console.AdvancedConsole", "line_number": 19, "usage_type": "name"}, {"api_name": "console.AdvancedConsole.__init__", "line_number": 30, "usage_type": "call"}, {"api_name": "console.AdvancedConsole", "line_number": 30, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "wpasupplicantconf.WpaSupplicantConf.ENCRYPTION_TYPE_WPA2", "line_number": 80, "usage_type": "attribute"}, {"api_name": "wpasupplicantconf.WpaSupplicantConf", "line_number": 80, "usage_type": "name"}, {"api_name": "wpasupplicantconf.WpaSupplicantConf.ENCRYPTION_TYPE_WPA", "line_number": 82, "usage_type": "attribute"}, {"api_name": "wpasupplicantconf.WpaSupplicantConf", "line_number": 82, "usage_type": "name"}, {"api_name": "wpasupplicantconf.WpaSupplicantConf.ENCRYPTION_TYPE_WEP", "line_number": 84, "usage_type": "attribute"}, {"api_name": "wpasupplicantconf.WpaSupplicantConf", "line_number": 84, "usage_type": "name"}, {"api_name": "wpasupplicantconf.WpaSupplicantConf.ENCRYPTION_TYPE_UNSECURED", "line_number": 86, "usage_type": "attribute"}, {"api_name": "wpasupplicantconf.WpaSupplicantConf", "line_number": 86, "usage_type": "name"}, {"api_name": "wpasupplicantconf.WpaSupplicantConf.ENCRYPTION_TYPE_UNKNOWN", "line_number": 88, "usage_type": "attribute"}, {"api_name": "wpasupplicantconf.WpaSupplicantConf", "line_number": 88, "usage_type": "name"}, {"api_name": "converters.dbm_to_percent", "line_number": 91, "usage_type": "call"}, {"api_name": "pprint.PrettyPrinter", "line_number": 140, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 142, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 142, "usage_type": "attribute"}]} +{"seq_id": "415862115", "text": "'''\nExercise 1b\n'''\n\nimport random\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport simulator_class as sim\n\nrandom.seed(42)\nnp.random.seed(42)\n\nif __name__ == '__main__':\n \n tot_loss_pr = [] \n tot_time_sys = []\n tot_th_av_time_sys_list = []\n tot_pB_list = []\n LOADS = np.linspace(1e-5,13,100).tolist()\n BUFFER_SIZES = [3,5,10]\n \n for BUFFER_SIZE in BUFFER_SIZES:\n \n load_list=[]\n loss_pr = []\n time_sys = []\n pB_list = []\n th_av_time_sys_list = []\n \n for LOAD in LOADS:\n \n # DATA OBJECT\n data = sim.Measure(0,0,0,0,0,0,0,0,0,0,[],[])\n \n # SIMULATION PARAMETERS\n SERVICE = 1000.0\n ARRIVAL = SERVICE/LOAD\n FOG_NODES = 1\n SIM_TIME = 300000\n \n # simulator\n s = sim.Simulator(data, LOAD, SERVICE, ARRIVAL, \n BUFFER_SIZE, FOG_NODES, SIM_TIME)\n print_everything = False\n data, time, _, _ = s.simulate(print_everything)\n \n # cumulate statistics\n load_list.append(LOAD)\n loss_pr.append(data.toCloud/data.arr)\n time_sys.append(data.delay/data.dep)\n \n # theoretical value for average time in the system\n th_av_num_us = 0\n for i in range(1,BUFFER_SIZE+FOG_NODES+1):\n pi = ((1 - LOAD) / (1 - LOAD**(BUFFER_SIZE+FOG_NODES+1))) * (LOAD**i)\n th_av_num_us += i * pi\n pB = ((1 - LOAD) / (1 - LOAD**(BUFFER_SIZE+FOG_NODES+1))) * (LOAD**(BUFFER_SIZE+FOG_NODES))\n exp_lambd = (1/ARRIVAL) - ((1/ARRIVAL)*pB)\n th_av_time_sys = th_av_num_us / exp_lambd\n pB_list.append(pB)\n th_av_time_sys_list.append(th_av_time_sys)\n \n tot_loss_pr.append(loss_pr)\n tot_time_sys.append(time_sys)\n tot_th_av_time_sys_list.append(th_av_time_sys_list)\n tot_pB_list.append(pB_list)\n \n \n #%% Loss probability vs Load\n\n colors = [['navy','darkorange','darkgreen'],\n ['tab:blue','tab:orange','tab:green']]\n for i in range(len(tot_loss_pr)):\n plt.plot(load_list, tot_loss_pr[i], '-', linewidth=0.7, c=colors[1][i], label=f'Simluated Buf={BUFFER_SIZES[i]}')\n #plt.plot(load_list, tot_pB_list[i], linewidth=0.5, c=colors[0][i], label=f'Theoretical B={BUFFER_SIZES[i]}')\n plt.grid()\n plt.legend()\n plt.xlabel(\"Load\")\n plt.ylabel(\"Forwarding probability\")\n #plt.xlim([0,20])\n plt.ylim([0,1])\n plt.title('Forwarding probability vs Load')\n plt.show()\n \n # Loss probability vs Load (zoomed)\n for i in range(len(tot_loss_pr)):\n plt.plot(load_list, tot_loss_pr[i], '.-', linewidth=0.5, c=colors[1][i], label=f'Simluated Buf={BUFFER_SIZES[i]}')\n plt.plot(load_list, tot_pB_list[i], linewidth=0.5, c=colors[0][i], label=f'Theoretical Buf={BUFFER_SIZES[i]}')\n plt.grid()\n plt.legend()\n plt.xlabel(\"Load\")\n plt.ylabel(\"Forwarding probability\")\n plt.xlim([0,3])\n plt.ylim([0,1])\n plt.title('Forwarding probability vs Load (zoomed)')\n plt.show()\n\n \n # Avg time spent in system vs Load\n for i in range(len(tot_time_sys)):\n plt.plot(load_list, tot_time_sys[i], '.-', linewidth=0.5, c=colors[1][i], label=f'Simulated Buf={BUFFER_SIZES[i]}')\n plt.plot(load_list, tot_th_av_time_sys_list[i], linewidth=1, c=colors[0][i], label=f'Theoretical Buf={BUFFER_SIZES[i]}')\n plt.grid()\n plt.legend(loc='lower right', ncol = 2)\n plt.xlabel(\"Load\")\n plt.ylabel(\"Avgerage time [ms]\")\n #plt.xlim([0,14])\n plt.ylim([-1000,13000])\n plt.title('Avg time spent in system vs Load')\n plt.show()\n\n", "sub_path": "Lab1/ex1b.py", "file_name": "ex1b.py", "file_ext": "py", "file_size_in_byte": 3762, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "random.seed", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 19, "usage_type": "call"}, {"api_name": "simulator_class.Measure", "line_number": 33, "usage_type": "call"}, {"api_name": "simulator_class.Simulator", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}]} +{"seq_id": "639039711", "text": "import numpy\nimport sys\nfrom PIL import Image, ImageDraw, ImageChops\nfrom xml.dom import minidom\n\ndoc = minidom.parse(sys.argv[2]) # .svg file parseString also exists\n\n\n# define the coordinate to crop\ndef coordinates_to_crop(coordinate_tuple, xMin, yMin, xMax, yMax):\n x, y = coordinate_tuple\n if(x < xMin):\n xMin = x\n if(x > xMax):\n xMax = x\n if(y < yMin):\n yMin = y\n if(y > yMax):\n yMax = y\n\n return xMin, yMin, xMax, yMax\n\n\ndef trim(im):\n bg = Image.new(im.mode, im.size, im.getpixel((0, 0)))\n diff = ImageChops.difference(im, bg)\n diff = ImageChops.add(diff, diff, 2.0, -100)\n bbox = diff.getbbox()\n if bbox:\n return im.crop(bbox)\n\n# Parse all the paths in a svg\nfor path in doc.getElementsByTagName('path'):\n path_strings = path.getAttribute('d') # The svg path as a string\n img_id = path.getAttribute('id') # Image id, for file name\n # Maybe not the best method for string parsing but will always work\n # for this kind of paths\n path_strings = path_strings.replace(\" Z \", \";\")\n path_strings = path_strings.replace(\" L \", \";\")\n path_strings = path_strings.replace(\" M \", \";\")\n path_strings = path_strings.replace(\"Z \", \"\")\n path_strings = path_strings.replace(\"L \", \"\")\n path_strings = path_strings.replace(\"M \", \"\")\n path_strings = path_strings.replace(\" Z\", \"\")\n path_strings = path_strings.replace(\" L\", \"\")\n path_strings = path_strings.replace(\" M\", \"\")\n\n # Split the path string into tuple string\n coordinate_tuples_string = path_strings.split(';')\n\n # polygon will contain the coordinates\n polygon = []\n xMax = yMax = 0\n xMin = yMin = float('Inf')\n\n for coordinate_string in coordinate_tuples_string:\n coordinate_list = coordinate_string.split(\" \")\n coordinate_tuple = (float(coordinate_list[0]), float(coordinate_list[1]))\n xMin, yMin, xMax, yMax = coordinates_to_crop(coordinate_tuple, xMin, yMin, xMax, yMax)\n polygon.append(coordinate_tuple)\n\n # read image as RGB and add alpha (transparency)\n im = Image.open(sys.argv[1]).convert(\"RGBA\") # .jpg file\n\n # convert to numpy (for convenience)\n imArray = numpy.asarray(im)\n\n # create mask\n maskIm = Image.new('L', (imArray.shape[1], imArray.shape[0]), 0)\n ImageDraw.Draw(maskIm).polygon(polygon, outline=1, fill=1)\n mask = numpy.array(maskIm)\n\n # assemble new image (uint8: 0-255)\n newImArray = numpy.empty(imArray.shape, dtype='uint8')\n\n # colors (three first columns, RGB)\n newImArray[:, :, :3] = imArray[:, :, :3]\n\n # transparency (4th column)\n newImArray[:, :, 3] = mask*255\n\n # back to Image from numpy\n newIm = Image.fromarray(newImArray, \"RGBA\")\n\n # make the image binary\n binaryIm = newIm.convert('L')\n binaryIm = binaryIm.point(lambda x: 0 if x < 160 else 255, '1')\n\n # paste to new image\n jpgSize = (int(round(newIm.size[0])), int(round(newIm.size[1])))\n background = Image.new('L', jpgSize, 255)\n background.paste(binaryIm, mask=newIm.split()[3]) # 3 is the alpha channel\n background = background.convert('1')\n\n # crop the imagen at size of the word\n cropIntegers = [int(round(x)) for x in (xMin, yMin, xMax, yMax)]\n background = background.crop(cropIntegers)\n\n # trim\n background = trim(background)\n\n # resize\n background = background.resize((100, 100), Image.ANTIALIAS)\n\n # jpg version\n # file_name = \"words/\" + img_id + \".jpg\"\n # background.save(file_name)\n\n # png version\n file_name = \"words/\" + img_id + \".png\"\n background.save(file_name)\n\n print(\"Created : \"+file_name)\n\ndoc.unlink()\n", "sub_path": "KST/img_svg_splitting.py", "file_name": "img_svg_splitting.py", "file_ext": "py", "file_size_in_byte": 3638, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "xml.dom.minidom.parse", "line_number": 6, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 6, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "PIL.Image.new", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 25, "usage_type": "name"}, {"api_name": "PIL.ImageChops.difference", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.ImageChops", "line_number": 26, "usage_type": "name"}, {"api_name": "PIL.ImageChops.add", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.ImageChops", "line_number": 27, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 63, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 69, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 69, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 74, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 83, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 91, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 91, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 103, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "327493238", "text": "import os\nimport sys\nassert 'openvino' in os.environ['PYTHONPATH']\nfrom imutils.video.pivideostream import PiVideoStream\nfrom imutils.video import FPS\nimport imutils\nimport time\nimport cv2\nfrom PIL import Image\nimport numpy as np\nfrom tensorflow.keras.applications.inception_v3 import decode_predictions\n\ntry:\n from openvino import inference_engine as ie\n from openvino.inference_engine import IENetwork, IEPlugin\nexcept Exception as e:\n exception_type = type(e).__name__\n print(\"The following error happened while importing Python API module:\\n[ {} ] {}\".format(exception_type, e))\n sys.exit(1)\n\ndef pre_process_image(image, img_height=224):\n # Model input format\n n, c, h, w = [1, 3, img_height, img_height]\n # image = Image.open(imagePath)\n processedImg = cv2.resize(image,(h, w), interpolation= cv2.INTER_LINEAR) #bilinear interpolation\n\n # Normalize to keep data between 0 - 1\n processedImg = (np.array(processedImg) - 0) / 255.0\n\n # Change data layout from HWC to CHW because GPU is optimised for this\n processedImg = processedImg.transpose((2, 0, 1))\n processedImg = processedImg.reshape((n, c, h, w))\n\n return processedImg\n\n# Plugin initialization for specified device and load extensions library if specified.\nplugin_dir = None\nmodel_xml = './model/frozen_model.xml'\nmodel_bin = './model/frozen_model.bin'\n# Devices: GPU (intel), CPU, MYRIAD\nplugin = IEPlugin(\"MYRIAD\")\n# Read IR\nnet = IENetwork.from_ir(model=model_xml, weights=model_bin)\nassert len(net.inputs.keys()) == 1\nassert len(net.outputs) == 1\ninput_blob = next(iter(net.inputs))\nout_blob = next(iter(net.outputs))\n# Load network to the plugin\nexec_net = plugin.load(network=net)\ndel net\n\n# Run inference\n# fileName = './data/waterbottle.jpg'\n# print(\"[INFO] sampling THREADED frames from `picamera` module...\")\nvs = PiVideoStream().start()\ntime.sleep(2.0)\nfps = FPS().start()\nwhile 1:\n frame = vs.read()\n cv2.imshow(\"Frame\", frame)\n #gives me hwc\n processedImg = pre_process_image(frame)\n start = time.time()\n res = exec_net.infer(inputs={input_blob: processedImg})\n # Access the results and get the index of the highest confidence score\n output_node_name = list(res.keys())[0]\n res = res[output_node_name]\n # Predicted class index.\n idx = np.argsort(res[0])[-1]\n # decode the predictions\n print('Predicted:', decode_predictions(res, top=3)[0])\n end = time.time()\n print(\"========================= Time Taken: {}ms ===========================\".format(end-start))\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n break\n\t# update the FPS counter\n fps.update()\n \n# stop the timer and display FPS information\nfps.stop()\nprint(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\nprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n \n# do a bit of cleanup\ncv2.destroyAllWindows()\nvs.stop()\n\n\n\n", "sub_path": "raspi_test.py", "file_name": "raspi_test.py", "file_ext": "py", "file_size_in_byte": 2868, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 3, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "openvino.inference_engine.IEPlugin", "line_number": 41, "usage_type": "call"}, {"api_name": "openvino.inference_engine.IENetwork.from_ir", "line_number": 43, "usage_type": "call"}, {"api_name": "openvino.inference_engine.IENetwork", "line_number": 43, "usage_type": "name"}, {"api_name": "imutils.video.pivideostream.PiVideoStream", "line_number": 55, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 56, "usage_type": "call"}, {"api_name": "imutils.video.FPS", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 60, "usage_type": "call"}, {"api_name": "time.time", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.keras.applications.inception_v3.decode_predictions", "line_number": 71, "usage_type": "call"}, {"api_name": "time.time", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "106115924", "text": "import os\nfrom flask import Flask, render_template, request\nimport giphypop\napp = Flask(__name__)\n\n\n#Defines function to pull the GIFs\ndef get_gif(search_term):\n g = giphypop.Giphy()\n gifs = g.search(search_term) # returns a list of objects \n return gifs\n # for gif in gifs:\n # print(gif.media_url)\n # print(gif.url)\n\n\n#creates the index page\n@app.route('/')\ndef index():\n name = request.values.get('name', \"Friends\")\n greeting = \"Hello, {}!\".format(name) \n return render_template('index.html', greeting=greeting)\n\n\n#creates the results page\n\n@app.route('/results')\ndef results():\n gif = request.values.get('gif')\n results = get_gif(gif)\n return render_template('results.html', results=results)\n \n\n#creates the about page\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n\n\nport = int(os.environ.get(\"PORT\", 5000))\napp.run(host=\"0.0.0.0\", port=port)\n", "sub_path": "giphy_app.py", "file_name": "giphy_app.py", "file_ext": "py", "file_size_in_byte": 927, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "giphypop.Giphy", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 42, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "469672529", "text": "import h5py\nimport numpy as np\n\nwith h5py.File('images_training.h5','r') as H:\n data = np.copy(H['data'])\n\nwith h5py.File('labels_training.h5','r') as H:\n label = np.copy(H['label'])\n\n#(reshape the data to 30000, 784)\ntwoD_data = data.reshape(30000, -1)\n\n# run pca and keep 90% eigenValue\nlowDinputData = pca(twoD_data, 0.9)\n\n\n\n\n #get mean by col\ndef zeroMean(inputData):\n meanVal=np.mean(inputData,axis=0)\n newData=inputData-meanVal\n return newData,meanVal\n\ndef pca(inputData,percentage):\n newData,meanVal=zeroMean(inputData)\n\n covMat=np.cov(newData,rowvar=0) #get cov\n eigVals,eigVects=np.linalg.eig(np.mat(covMat)) #get eigenValue, eigenVector\n k=getPercentageOfEig(eigVals,percentage) #difine how many % of eigVects to keep\n\n resortEigVal=np.argsort(eigVals) #resort eigenValue\n k_resortEigVal=resortEigVal[-1:-(k+1):-1]\n k_eigVect=eigVects[:,k_resortEigVal] #get eigenVector by resort eigenValue\n lowDData=newData*k_eigVect # get the lower dimension data\n\n return lowDData\n\n\ndef getPercentageOfEig(eigVals,percentage): #return the number of eigenValue to keep by percentage\n sortArray=np.sort(eigVals)\n sortArray=sortArray[-1::-1]\n arraySum=sum(sortArray)\n tmpSum=0\n num=0\n\n for i in sortArray:\n tmpSum+=i\n num+=1\n if tmpSum>=arraySum*percentage:\n return num\n", "sub_path": "pca.py", "file_name": "pca.py", "file_ext": "py", "file_size_in_byte": 1418, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "h5py.File", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 5, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.mat", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "567269395", "text": "import matplotlib.pyplot as plt\nimport matplotlib.dates as dt\nfrom matplotlib.dates import DateFormatter\nimport numpy as np\nimport json\nfrom datetime import datetime, timedelta\nimport matplotlib.dates as mdates\nimport emoji\n\n\n#### Facebook Chat Statistics ####\n\n# Load JSON data. Edit the path below to the path of your parsed conversation.\ndata = json.load(open('/Path/To/Your/Conversation.json'))\n\n# Names. Change these to your liking.\nyou = 'You'\npartner = 'Partner'\n\n# Convert unicode characters into what Python expects them to look like\nfor message in data['messages']:\n\tmessage['sender_name'] = message['sender_name'].encode('raw_unicode_escape').decode('utf-8')\n\tif 'content' in message:\n\t\tmessage['content'] = message['content'].encode('raw_unicode_escape').decode('utf-8')\n\n# If the archive isn't sorted by time, you may get errors. Uncomment this line to fix them.\n# data['messages'] = sorted(data['messages'], key=lambda message: message['timestamp'])\n\n# Start time\nstart_time = datetime.fromtimestamp(data['messages'][-1]['timestamp'])\nprint(\"Start time: \" + str(start_time))\n\n# End time\nend_time = datetime.fromtimestamp(data['messages'][0]['timestamp'])\nprint(\"End time: \" + str(end_time))\n\n#### Totals ####\n\n# Number of days\nnbr_days = (end_time - start_time).days\nprint(\"Number of days: \" + str(nbr_days))\n\n# Number of messages\nnbr_msg = len(data['messages'])\nprint(\"Number of messages: \" + str(nbr_msg))\n\n# Total number of words\nnbr_words = 0\nfor message in data['messages']:\n\tif 'content' in message:\n\t\tnbr_words += len(message['content'].split())\nprint(\"Number of words: \" + str(nbr_words))\n\n#### Averages ####\n\n# Length of a message\navg_len_msg = round(nbr_words / nbr_msg, 1)\nprint(\"Average length of messages: \" + str(avg_len_msg) + \" words\")\n\n# Messages per day\navg_msg_per_day = round(nbr_msg / nbr_days, 1)\nprint(\"Average messages per day: \" + str(avg_msg_per_day))\n\n# Plot of who texts the most\nnbr_you = 0\nnbr_partner = 0\nfor message in data['messages']:\n\tif message['sender_name'] == you:\n\t\tnbr_you += 1\n\telse:\n\t\tnbr_partner += 1\nprocentage_you = 100 * round(nbr_you / nbr_msg, 2)\nprocentage_partner = 100 * round(nbr_partner / nbr_msg, 2)\nfracs = [procentage_you, procentage_partner];\nlabels = [you, partner]\ncolors = ['xkcd:crimson', '#F08080']\npie = plt.pie(fracs, colors=colors, labels=labels, startangle=90, autopct='%1.1f%%')\nplt.axis('equal')\nplt.title(\"Who texts the most?\")\nplt.show()\nprint(\"Number of times \" + you + \": \" + str(nbr_you) + ' (' + str(procentage_you) + ')')\nprint(\"Number of times \" + partner + \": \" + str(nbr_partner) + ' (' + str(procentage_partner) + ')')\n\n# Fetch timeline data\ntimeline = [None] * (nbr_days + 2)\nhour = list(range(24))\nweekday_arr = [0, 1, 2, 3, 4, 5, 6]\nnbr_times_hour = [0] * 24\nnbr_times_weekday = [0] * 7\nnbr_times_day = [0] * (nbr_days + 2)\ncurrent_day = end_time.date()\nindex = len(timeline) - 1\ntimeline[index] = current_day\nnbr_times_day[index] = 1\nfor message in data['messages']:\n\tcurrent = datetime.fromtimestamp(message['timestamp'])\n\th = current.hour + current.minute / 60. + current.second / 3600\n\th = int(round(h))\n\tif h == 24:\n\t\th = 0\n\tnbr_times_hour[h] = nbr_times_hour[h] + 1\n\twd = current.weekday()\n\tnbr_times_weekday[wd] = nbr_times_weekday[wd] + 1\n\tcurrent = current.date()\n\tif current == current_day:\n\t\tnbr_times_day[index] = nbr_times_day[index] + 1\n\telif current < current_day:\n\t\tdiff = (current_day - current).days\n\t\tindex = index - diff\n\t\tcurrent_day = current\n\t\ttimeline[index] = current_day\n\t\tnbr_times_day[index] = 1\ndates = [None] * len(timeline)\nfor i in range(0, len(timeline)):\n\tif timeline[i] == None:\n\t\ttimeline[i] = timeline[i - 1] + timedelta(days=1)\n\tdates[i] = timeline[i].strftime(\"%Y-%m-%d\")\n\n# Plot timeline\nfmt = mdates.DateFormatter('%Y-%m-%d')\nloc = mdates.MonthLocator(interval=6)\nax = plt.axes()\nax.xaxis.set_major_formatter(fmt)\nax.xaxis.set_major_locator(loc)\nplt.bar(timeline, nbr_times_day, align=\"center\", width=8, color='xkcd:crimson')\nplt.title(\"Timeline\")\nax = plt.axes()\nax.yaxis.grid(linestyle='--')\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_linewidth(0.5)\nax.spines['left'].set_linewidth(0.5)\nfig = plt.figure(1)\nfig.autofmt_xdate()\nplt.tight_layout()\nplt.show()\n\n# Plot by hour\nplt.bar(hour, nbr_times_hour, align=\"center\", width=0.8, color='xkcd:crimson')\nplt.title(\"Activity by Day\")\nax = plt.axes()\nax.yaxis.grid(linestyle='--')\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_linewidth(0.5)\nax.spines['left'].set_linewidth(0.5)\nfig = plt.figure(1)\nplt.tight_layout()\nplt.show()\n\n# Plot by weekday\nweekday_labels = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\nplt.bar(weekday_arr, nbr_times_weekday, align=\"center\", width=0.8, color='xkcd:crimson')\nplt.xticks(weekday_arr, weekday_labels)\nplt.title(\"Activity by Week\")\nax = plt.axes()\nax.yaxis.grid(linestyle='--')\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_linewidth(0.5)\nax.spines['left'].set_linewidth(0.5)\nfig = plt.figure(1)\nplt.tight_layout()\nplt.show()\n\n# Most messages in one day\nmost_msg = max(nbr_times_day)\nprint(\"Most messages in one day: \" + str(most_msg))\n\n# Number of red hearts sent by each person and the most used emojies\nnbr_hearts_you = 0\nnbr_hearts_partner = 0\nemojis_list = {}\niter = iter(emoji.UNICODE_EMOJI.values())\nfor k in iter:\n\temojis_list[k] = 0\nfor message in data['messages']:\n\tif 'content' in message:\n\t\tmsg = message['content']\n\t\tsender = message['sender_name']\n\t\tfor c in msg:\n\t\t\temoji_str = emoji.demojize(c)\n\t\t\tif emoji_str == ':red_heart:':\n\t\t\t\tif sender == you:\n\t\t\t\t\tnbr_hearts_you = nbr_hearts_you + 1\n\t\t\t\telse:\n\t\t\t\t\tnbr_hearts_partner = nbr_hearts_partner + 1\n\t\t\tif emoji_str in emojis_list:\n\t\t\t\temojis_list[emoji_str] = emojis_list[emoji_str] + 1\nprint(\"Number of \" + emoji.emojize(':red_heart:') + \" \" + you + \": \" + str(nbr_hearts_you))\nprint(\"Number of \" + emoji.emojize(':red_heart:') + \" \" + partner + \": \" + str(nbr_hearts_partner))\ntop_emojies = []\nemoji_count = []\nfor emoji_key, count in sorted(emojis_list.items(), key=lambda kv: (-kv[1], kv[0]))[:10]:\n\ttop_emojies.append(emoji.emojize(emoji_key))\n\temoji_count.append(count)\n\n# Plot top 10 emojies\nx = np.arange(len(top_emojies))\nplt.bar(x, emoji_count, align=\"center\", width=0.8, color='xkcd:crimson')\nplt.xticks(x, top_emojies)\nplt.title(\"Top 10 Emojis\")\nax = plt.axes()\nax.yaxis.grid(linestyle='--')\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_linewidth(0.5)\nax.spines['left'].set_linewidth(0.5)\nfig = plt.figure(1)\nplt.tight_layout()\nplt.show()", "sub_path": "facebook_chat_statistics.py", "file_name": "facebook_chat_statistics.py", "file_ext": "py", "file_size_in_byte": 6687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "json.load", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pie", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 96, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.dates.MonthLocator", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "emoji.UNICODE_EMOJI.values", "line_number": 174, "usage_type": "call"}, {"api_name": "emoji.UNICODE_EMOJI", "line_number": 174, "usage_type": "attribute"}, {"api_name": "emoji.demojize", "line_number": 182, "usage_type": "call"}, {"api_name": "emoji.emojize", "line_number": 190, "usage_type": "call"}, {"api_name": "emoji.emojize", "line_number": 191, "usage_type": "call"}, {"api_name": "emoji.emojize", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}]} +{"seq_id": "342441489", "text": "import os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.md')).read()\n\nrequires = [\n 'flask',\n 'wtforms',\n 'sqlalchemy',\n 'nose',\n 'coverage',\n 'formencode',\n ]\n\nsetup(name='rocpy',\n version='0.0',\n description='rocpy-website',\n long_description=README,\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Flask\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n author='RocPy',\n author_email='',\n url='http://github.com/rocpy/rocpy-website',\n keywords='web wsgi flask rocpy',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n tests_require=[\n ],\n test_suite='rocpy',\n install_requires=requires,\n )\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 908, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 17, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "617554849", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSave multi-atlas label fusion results for the test data from propagated atlases.\r\n\r\n@author: Xinzhe Luo\r\n\r\n@version: 0.1\r\n\"\"\"\r\n\r\nfrom __future__ import print_function, division, absolute_import, unicode_literals\r\n# from core import model_ddf_mvmm_label_base as model\r\nfrom core import image_dataset as image_utils\r\nfrom core import utils\r\n# import nibabel as nib\r\nimport numpy as np\r\n# import tensorflow as tf\r\nimport os\r\nimport logging\r\nimport pandas as pd\r\nimport argparse\r\nfrom datetime import datetime\r\n\r\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')\r\n# config = tf.ConfigProto(device_count={'GPU': 0}) # cpu only\r\nt = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\r\n\r\nparser = argparse.ArgumentParser(description=\"Start label fusion on propagated atlases!\")\r\nparser.add_argument('--model_path', type=str, required=True, \r\n help='model path where to load the pairwise label propagation results')\r\nparser.add_argument('--method', default='multiplication', type=str,\r\n choices=['multiply_mask', 'multiply_ncc', 'majority_voting'],\r\n help='method to perform multi-atlas label fusion')\r\nparser.add_argument('--atlas_search_path',\r\n default='./2019-11-14_12-15-22_ddf_mvmm_label_mr_mr_2mm/test_predictions_commonspace2_2mm/*.nii.gz',\r\n type=str, help='search pattern to find all atlas data')\r\nparser.add_argument('--target_search_path',\r\n default='../../../dataset/test_mr_40_commonspace2/*.nii.gz', type=str,\r\n help='search pattern to find all target data')\r\nparser.add_argument('--target_modality', default='mr', choices=[\"mr\", \"ct\"],\r\n help=\"the modality of target images, either 'mr' or 'ct'\")\r\nparser.add_argument('--atlas_modality', default='mr', choices=['mr', 'ct'],\r\n help=\"the modality of atlas image, either 'mr' or 'ct'\")\r\nparser.add_argument('--image_suffix', default='image.nii.gz', type=str,\r\n help='suffix pattern for image loading')\r\nparser.add_argument('--label_suffix', default='label.nii.gz', type=str,\r\n help='suffix pattern for label loading')\r\nparser.add_argument('--weight_suffix', default=None, type=str,\r\n help='suffix pattern for weight loading')\r\nparser.add_argument('--channels', default=1, type=int, help='number of image channels')\r\nparser.add_argument('--n_class', default=8, type=int, help='number of label classes, including the background')\r\nparser.add_argument('--label_intensity', default=(0, 205, 420, 500, 550, 600, 820, 850), type=int, nargs='+',\r\n help='list of intensities of the training ground truths')\r\nparser.add_argument('--num_targets', default=40, type=int, help='number of targets to be fused')\r\nparser.add_argument('--num_atlases', default=20, type=int, help='number of atlases to be fused')\r\nparser.add_argument('--crop_patch', default=True, action='store_false',\r\n help='whether patches of a certain size need to be cropped for training')\r\nparser.add_argument('--patch_size', default=(80, 80, 80), type=int, nargs=3,\r\n help='size of the training patches')\r\nparser.add_argument('--original_size', default=(112, 96, 112), type=int, nargs=3,\r\n help='original size of the saved image')\r\nparser.add_argument('--crop_roi', default=False, action='store_true',\r\n help='whether to crop ROI containing the whole foreground on training data')\r\nparser.add_argument('--num_blocks', default=(1, 1, 1), type=int, nargs=3,\r\n help='the number of blocks of input data along each axis')\r\nparser.add_argument('--scale', default=0, type=int, help='scale of the processed data')\r\nparser.add_argument('--sigma', default=1., type=float, help='scale of the Gaussian filter to produce prob labels')\r\nparser.add_argument('--stage', default='multi', choices=['single', 'multi'],\r\n help=\"the registration stage, either 'single' or 'multi', default 'multi'\")\r\nargs = parser.parse_args()\r\n\r\n\r\nif __name__ == '__main__':\r\n # change chief working directory\r\n os.chdir('../')\r\n logging.info(\"Chief working directory changed to: %s\" % os.path.abspath(os.getcwd()))\r\n\r\n # tissue types\r\n tissue = {205: 'myocardium', 420: 'LA', 500: 'LV', 550: 'RA',\r\n 600: 'RV', 820: 'ascending_aorta', 850: 'pulmonary_artery'}\r\n\r\n # where to save the fused labels\r\n save_path = os.path.join(args.model_path,\r\n 'label_fusions_commonspace2_atlases_%s_%s' % (args.num_atlases, args.method))\r\n # save_path = os.path.join(args.model_path,\r\n # 'label_fusions_commonspace2_%s_atlases_%s_%s' % (tissue[args.label_intensity[1]],\r\n # args.num_atlases, args.method))\r\n\r\n if not os.path.exists(save_path):\r\n logging.info(\"Allocating '%s'\" % save_path)\r\n os.makedirs(save_path)\r\n\r\n # where to save the metrics\r\n metrics_path = os.path.join(args.model_path,\r\n 'metrics_fusions_commonspace2_%s.xlsx' % args.method)\r\n\r\n # create data provider\r\n data_provider = image_utils.ImageDataProvider(target_modality=args.target_modality,\r\n atlas_modality=args.atlas_modality,\r\n target_search_path=args.target_search_path,\r\n atlas_search_path=args.atlas_search_path,\r\n image_suffix=args.image_suffix,\r\n label_suffix=args.label_suffix,\r\n weight_suffix=args.weight_suffix,\r\n n_atlas=args.num_atlases,\r\n crop_patch=args.crop_patch,\r\n patch_size=args.patch_size,\r\n crop_roi=args.crop_roi,\r\n channels=args.channels,\r\n n_class=args.n_class,\r\n label_intensity=args.label_intensity,\r\n num_blocks=args.num_blocks,\r\n scale=args.scale,\r\n stage=args.stage)\r\n\r\n logging.info(\"Number of target-atlas pairs: %s\" % len(data_provider))\r\n target_atlas_image_names = data_provider.target_atlas_image_names\r\n\r\n # set indices and columns for metrics saving into excel files\r\n frame_indices = utils.remove_duplicates([os.path.basename(pair_names[0]).replace(args.image_suffix, '')\r\n for pair_names in target_atlas_image_names])\r\n frame_columns = utils.remove_duplicates(['&'.join([os.path.basename(atlas_name)[-39:].replace(args.image_suffix, '')\r\n for atlas_name in pair_names[1]])\r\n for pair_names in target_atlas_image_names])\r\n # print(frame_columns)\r\n\r\n # set metrics to save\r\n metrics_to_save = {'Dice': np.empty([len(frame_indices), len(frame_columns)]),\r\n 'Jaccard': np.empty([len(frame_indices), len(frame_columns)]),\r\n 'Myocardial Dice': np.empty([len(frame_indices), len(frame_columns)]),\r\n 'LA Dice': np.empty([len(frame_indices), len(frame_columns)]),\r\n 'LV Dice': np.empty([len(frame_indices), len(frame_columns)]),\r\n 'RA Dice': np.empty([len(frame_indices), len(frame_columns)]),\r\n 'RV Dice': np.empty([len(frame_indices), len(frame_columns)]),\r\n 'AO Dice': np.empty([len(frame_indices), len(frame_columns)]),\r\n 'PA Dice': np.empty([len(frame_indices), len(frame_columns)]),\r\n 'Average Surface Distance': np.empty([len(frame_indices), len(frame_columns)]),\r\n 'Hausdorff Distance': np.empty([len(frame_indices), len(frame_columns)])}\r\n\r\n logging.info(\"Start label fusion using method: %s\" % args.method)\r\n # initialize fusion model\r\n fusion_model = utils.MvMMExpectationMaximization(n_class=args.n_class, spacing_mm=(2, 2, 2), sigma=args.sigma)\r\n\r\n # label fusion\r\n for idx in range(min(len(data_provider), args.num_targets)):\r\n # get target-atlases pair names\r\n target_name = os.path.basename(target_atlas_image_names[idx][0]).replace(args.image_suffix, '')\r\n atlases_name = '&'.join([os.path.basename(atlas_name)[-39:].replace(args.image_suffix, '')\r\n for atlas_name in target_atlas_image_names[idx][1]])\r\n\r\n assert target_name == frame_indices[idx // len(frame_columns)], \"Target name: %s and frame index %s \" \\\r\n \"should be equal!\" % (target_name,\r\n frame_indices[idx // len(frame_columns)])\r\n assert atlases_name == frame_columns[idx % len(frame_columns)], \"Atlases name: %s and frame column %s \" \\\r\n \"should be equal!\" % (atlases_name,\r\n frame_columns[idx % len(frame_columns)])\r\n\r\n logging.info(\"[Index]: %s; [Target]: %s; [Propagated atlases]: %s\" % (idx, target_name, atlases_name))\r\n\r\n # load data\r\n data = data_provider[idx]\r\n\r\n # fuse labels\r\n fused_label, metrics = fusion_model.get_simple_fusion_result(warped_atlases_label=data['atlases_label'],\r\n target_labels=data['target_label'],\r\n method=args.method,\r\n warped_atlases_weight=data['atlases_weight'])\r\n # save metrics\r\n for k, v in metrics_to_save.items():\r\n v[idx // len(frame_columns), idx % len(frame_columns)] = metrics[k]\r\n\r\n # save the fused label\r\n utils.save_prediction_nii(fused_label.squeeze(0), save_path, data_provider, data_type='label',\r\n save_name=target_name + 'fusion.nii.gz',\r\n affine=data['target_affine'], header=data['target_header'],\r\n stage=args.stage, original_size=args.original_size)\r\n\r\n # convert metrics into DataFrames\r\n metrics_DataFrame = {}\r\n for k, v in metrics_to_save.items():\r\n metrics_DataFrame[k] = pd.DataFrame(v, index=frame_indices, columns=frame_columns, dtype=np.float32)\r\n\r\n # save metrics into excel files\r\n with pd.ExcelWriter(metrics_path) as writer:\r\n for k, v in metrics_DataFrame.items():\r\n v.to_excel(writer, sheet_name=k)\r\n", "sub_path": "src_3d/save_label_fusion.py", "file_name": "save_label_fusion.py", "file_ext": "py", "file_size_in_byte": 11372, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.basicConfig", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 23, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 74, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 89, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "core.image_dataset.ImageDataProvider", "line_number": 97, "usage_type": "call"}, {"api_name": "core.image_dataset", "line_number": 97, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 115, "usage_type": "call"}, {"api_name": "core.utils.remove_duplicates", "line_number": 119, "usage_type": "call"}, {"api_name": "core.utils", "line_number": 119, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "core.utils.remove_duplicates", "line_number": 121, "usage_type": "call"}, {"api_name": "core.utils", "line_number": 121, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 137, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 139, "usage_type": "call"}, {"api_name": "core.utils.MvMMExpectationMaximization", "line_number": 141, "usage_type": "call"}, {"api_name": "core.utils", "line_number": 141, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 157, "usage_type": "call"}, {"api_name": "core.utils.save_prediction_nii", "line_number": 172, "usage_type": "call"}, {"api_name": "core.utils", "line_number": 172, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 180, "usage_type": "attribute"}, {"api_name": "pandas.ExcelWriter", "line_number": 183, "usage_type": "call"}]} +{"seq_id": "215022281", "text": "\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# @Filename : conver_dataset_meden_@\n# @Date : 12/30/2018 15:25:39\n# @Poject : DGMP\n# @Author : FEI, hfut_jf@aliyun.com\n# @Desc : use threshold to determine active/inactive\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nsys.path.append(os.path.abspath('/network/rit/lab/ceashpc/fjie/projects/DGMP'))\n\nimport time\nimport pickle\nimport itertools\nimport logging\n\nlogger = logging.getLogger('fei')\nformatter = logging.Formatter('')\nconsoled_handler = logging.StreamHandler()\nconsoled_handler.setFormatter(formatter)\nlogger.addHandler(consoled_handler)\nlogger.setLevel(logging.DEBUG)\nlogger.propagate = False\n\nimport numpy as np\n\nNUM_TRAIN_INSTANCES = 70\nNUM_TEST_INSTANCES = 70\n\ndef convert_meden_binarize():\n\n num_nodes = 100\n\n # overlap_list = [0.4, 0.8]\n overlap_list = [0.4, 0.8]\n mu_1_list = [3]\n graph_type = 'Grid'\n run = 4\n subsize_list = [(10, 20)]\n pattern = 'm'\n num_windows_list = [11]\n num_signal_windows_list = [5]\n data_type = 'train'\n # data_type = 'test'\n\n input_path = '/network/rit/lab/ceashpc/share_data/{}/run{}/dgmp'.format(graph_type, run)\n\n for num_windows, num_signal_windows, subsize_range, mu_1, overlap in itertools.product(num_windows_list, num_signal_windows_list, subsize_list, mu_1_list, overlap_list):\n\n true_start = int(num_windows / 2.0) - int(num_signal_windows / 2.0)\n true_end = int(num_windows / 2.0) + int(num_signal_windows / 2.0)\n\n subsize_min, subsize_max = subsize_range\n fn = 'nodes_{}_windows_{}_mu_{}_subsize_{}_{}_range_{}_{}_overlap_{}_{}_{}.pkl'.format(num_nodes, num_windows, mu_1, subsize_min, subsize_max, true_start, true_end, overlap, pattern, data_type)\n logger.debug('start to process file={}'.format(fn))\n\n # threshold_range = range(1, mu_1)\n threshold_range = [1, 2, 3, 4, 5]\n\n with open(os.path.join(input_path, fn), 'rb') as rfile:\n dataset = pickle.load(rfile)\n\n for id in dataset.keys():\n data = dataset[id]\n graph = data['graph']\n\n for threshold in threshold_range:\n output_path = '/network/rit/lab/ceashpc/share_data/{}/run{}/meden/subsize_{}_{}/threshold_{}'.format(graph_type, run, subsize_min, subsize_max, threshold)\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n\n output_fn = 'nodes_{}_windows_{}_mu_{}_subsize_{}_{}_range_{}_{}_overlap_{}_{}_{}_{}_{}.txt'.format(num_nodes, num_windows, mu_1, subsize_min, subsize_max, true_start, true_end, overlap, pattern, data_type, id, threshold)\n\n with open(os.path.join(output_path, output_fn), 'w') as wfile:\n for t in range(num_windows):\n features = data['features'][t]\n for (u, v) in graph.edges():\n avg_edge_weight = (features[u]+features[v])/2.\n if avg_edge_weight > threshold:\n converted_edge_weight = 1\n else:\n converted_edge_weight = -1\n\n line = '{},{},{},{}\\n'.format(u, v, t, converted_edge_weight)\n wfile.write(line)\n\n\nif __name__ == '__main__':\n convert_meden_binarize()", "sub_path": "share/convert_dataset_meden_2.py", "file_name": "convert_dataset_meden_2.py", "file_ext": "py", "file_size_in_byte": 3372, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 28, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}]} +{"seq_id": "361658436", "text": "import sys\nfrom flask import Flask, jsonify\nimport json, requests\nfrom flask_cors import CORS, cross_origin\nfrom flask import json\nfrom flask.globals import request\n# import os\n# import email_notification as en\nfrom datetime import datetime\nimport message_processor\nimport re\nimport traceback\n\n# --------------------------------------------------------------------------------------------\n\napp = Flask(__name__)\nCORS(app)\n\n#Method to stop the Flask_API before changes are done in config file, calling this in start_bot.BAT file\ndef shutdown_server():\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n\n# # ----------------------------FAQ Classifier Called--------------------------------------------------------------------------\n\n@app.route('/tamak-bot', methods=['POST'])\n@cross_origin()\ndef GetFAQ():\n if request.method != 'POST':\n return json.dumps({\"Status\": \"ERROR\", \"DATA\": None, \"Reason\": \"Only accept POST request\"})\n if not request.headers['Content-Type'] == 'application/json':\n return json.dumps({\"Status\": \"ERROR\", \"DATA\": None, \"Reason\": \"Only accept Content-Type:application/json\"})\n if not request.is_json:\n return json.dumps({\"Status\": \"ERROR\", \"DATA\": None,\n \"Reason\": 'Expecting json data in the form {\"data\":\"VALUE\"}'})\n data = request.json\n print(data)\n if 'message' not in data:\n return json.dumps({\"Status\": \"ERROR\", \"DATA\": None, \"Reason\": 'Expecting key as data'})\n try:\n statement = data['message']\n print(statement)\n statement=statement.replace(',',\" \")\n statement = statement.replace('.', \" \")\n print(\"data receiving from gui bot\",data)\n except Exception as e:\n print(\"there is some issue\")\n return json.dumps({\"DATA\": None,\n })\n try:\n\n data=message_processor.inp(re.sub(\" +\",\" \",statement.strip()), data['sender'])#, data['sess_id']#,data[\"email_id\"],data[\"username\"],data[\"Session_start_time\"],data['Hit_count'])#data['Hit_count']\n print(\"------------------------------------------------------------------------------\")\n except Exception as e:\n #print(os.getcwd())\n k = traceback.format_tb(e.__traceback__)\n with open(\"Input_exceptions.txt\", \"a\", encoding='utf-8') as myfile: # ,encoding='utf-8'\n myfile.write(\n \"[\" + str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) + \"]\" +'('+str(data[\"sender\"])+')' + \"\\n\" +\"User_Input: \" + str(statement) + '\\n'+'Error_Details: ' + str(e) +'\\n'+\"Traceback_details: \"+str(k)+\"\\n\\n\")\n # print(en.send_email(e,statement,data[\"username\"],data[\"email_id\"]))\n print(e)\n return json.dumps({'Status\": \"ERROR\", \"DATA\": None, \"Reason\": \"Internal server error'})\n # print(session_id,\"sessionid\")\n # print(type(session_id), \"sessionid\")\n # print(data,\"data\")\n return json.dumps([{\"Status\": \"SUCCESS\", \"text\": data}])#, \"recipient_id\":session_id }])\n\n@app.route('/shutdown', methods=['GET'])\ndef shutdown():\n shutdown_server()\n return 'Restarting the UMKC ROO BOT...'\n\n# ---------------------------------------------------------------------------------------------------\n\ndef startAPIs():\n try:\n app.run(\"192.168.1.240\", port=(5004), debug=False, threaded=False)\n app.run()\n except Exception as e:\n raise (\"APIs not started Exception (startAPIs ) at : \" + str(\"192.168.1.240\") + \":\" + str(5004) + \" due to :\" + str(\n e))\nif __name__ == '__main__':\n startAPIs()\n", "sub_path": "Source_code/bot_api.py", "file_name": "bot_api.py", "file_ext": "py", "file_size_in_byte": 3626, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.globals.request.environ.get", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.globals.request.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.globals.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.globals.request.method", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.globals.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.globals.request.headers", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.globals.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.globals.request.is_json", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.globals.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.globals.request.json", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.globals.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 50, "usage_type": "name"}, {"api_name": "message_processor.inp", "line_number": 54, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 54, "usage_type": "call"}, {"api_name": "traceback.format_tb", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 68, "usage_type": "name"}, {"api_name": "flask_cors.cross_origin", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "149546629", "text": "# Tutorial: https://www.w3schools.com/python/default.asp\r\n# Read CSV file: https://cmdlinetips.com/2011/08/three-ways-to-read-a-text-file-line-by-line-in-python/\r\n# Make DB connection: http://www.postgresqltutorial.com/postgresql-python/connect/\r\n# Execute insert statement: http://initd.org/psycopg/docs/usage.html\r\n\r\n# Opzet van script\r\nimport os\r\nimport psycopg2\r\n\r\nCSV_FOLDER = 'C:/Users/yarad/Documents/school/Engineer herkansing/Testdata'\r\nCSV_FILENAME = 'test_aardbevingen.csv'\r\nDATABASE_CONNECTION = \"dbname=oefen1 user=postgres password=postgres\"\r\n\r\n# Inlezen csv bestand in database\r\nos.chdir(CSV_FOLDER)\r\n\r\n# Stap 1: Openen bestand\r\ncsv_file = open(CSV_FILENAME, \"r\")\r\n\r\n# Stap 2: Openen database\r\nconn = psycopg2.connect(DATABASE_CONNECTION)\r\ncur = conn.cursor()\r\n\r\n# Stap 3: Ophalen rij uit bestand\r\nline_nr = 0\r\nline = csv_file.readline()\r\nwhile line:\r\n line_nr = line_nr + 1\r\n print(line)\r\n # Stap 4: Ophalen waardes uit rij\r\n \r\n if line_nr > 1 :\r\n values = line.split(';')\r\n \r\n id = values[0]\r\n jaar = values[1].replace('/', '0')\r\n maand = values[2]\r\n dag = values[3]\r\n magnitude = values[4]\r\n if values[6] != '' and values[5] != '' : #Wanneer deze kolommen waardes hebben, maak de x en y floats van deze kollommen. \r\n x = float(values[6])\r\n y = float(values[5])\r\n if maand != '' and len(maand) == 2 and magnitude != '' and dag != '' and x != '' and y != '' : #Checkt of kollommen waardes hebben, dan insert uitvoeren van die kolommen. \r\n cur.execute(\"INSERT INTO public.aardbevingen(id, jaar, maand, dag, geom, magnitude) VALUES (%s, %s, %s, %s, ST_SetSRID(ST_MakePoint(%s, %s),4326), %s )\",(id, jaar, maand, dag, x, y, magnitude ))\r\n # Met len(kolom) == getal bepaal je dat alleen data rijen insert worden die binnen genoemde kolom de lengte hebben van genoemd getal.\r\n # De if kolom != '' zegt dus: gebruik alleen de waardes uit een kolom en negeer lege cellen.\r\n # '' is dus een waarde. Als je bijv alleen aardbevingen uit december wilt inserten, gebruik je \"maand != '12'.\r\n # Commit\r\n conn.commit()\r\n # Stap 8: Ophalen volgende rij\r\n line = csv_file.readline()\r\n\r\n# Stap 9: Print feedback\r\n\r\n# Stap 10: Sluiten database\r\ncsv_file.close()\r\nconn.close() ", "sub_path": "Python werkende voorbeeldscripts/csv2dbaardbevingen+replace slash in jaar kolom met een 0.py", "file_name": "csv2dbaardbevingen+replace slash in jaar kolom met een 0.py", "file_ext": "py", "file_size_in_byte": 2326, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.chdir", "line_number": 15, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "539546487", "text": "from bookie_app import model\nfrom bookie_app.model import meta\nfrom sqlalchemy import Table, Column, Integer, ForeignKey\n\n# tied table used to associate the Bookmark to the Tag models\nbookmark_tags_table = Table('bookmarks_tags', meta.metadata,\n Column('bookmark_id', Integer, ForeignKey('bookmarks.id')),\n Column('tag_id', Integer, ForeignKey('tags.id')),\n )\n\n\n# This is the association table for the many-to-many relationship between\n# groups and permissions.\ngroup_permission_table = Table('group_permission', meta.Base.metadata,\n Column('group_id', Integer, \n ForeignKey('group.group_id', onupdate=\"CASCADE\", ondelete=\"CASCADE\")),\n Column('permission_id', Integer, \n ForeignKey('permission.permission_id', onupdate=\"CASCADE\", ondelete=\"CASCADE\"))\n)\n\n# This is the association table for the many-to-many relationship between\n# groups and members - this is, the memberships.\nuser_group_table = Table('user_group', meta.Base.metadata,\n Column('user_id', Integer, ForeignKey('user.user_id',\n onupdate=\"CASCADE\", ondelete=\"CASCADE\")),\n Column('group_id', Integer, ForeignKey('group.group_id',\n onupdate=\"CASCADE\", ondelete=\"CASCADE\"))\n)\n", "sub_path": "pylons/bookie/model/tied_tables.py", "file_name": "tied_tables.py", "file_ext": "py", "file_size_in_byte": 1193, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sqlalchemy.Table", "line_number": 6, "usage_type": "call"}, {"api_name": "bookie_app.model.meta.metadata", "line_number": 6, "usage_type": "attribute"}, {"api_name": "bookie_app.model.meta", "line_number": 6, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 7, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 7, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 7, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 8, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 8, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 8, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 14, "usage_type": "call"}, {"api_name": "bookie_app.model.meta.Base", "line_number": 14, "usage_type": "attribute"}, {"api_name": "bookie_app.model.meta", "line_number": 14, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 15, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 17, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 23, "usage_type": "call"}, {"api_name": "bookie_app.model.meta.Base", "line_number": 23, "usage_type": "attribute"}, {"api_name": "bookie_app.model.meta", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 24, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 26, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "578123797", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('studentsapp', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Groups',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=255, verbose_name='\\u0418\\u043c\\u044f')),\n ('notes', models.TextField(default=b'', verbose_name='\\u0414\\u043e\\u043f\\u043e\\u043b\\u043d\\u0438\\u0442\\u0435\\u043b\\u044c\\u043d\\u0430\\u044f \\u0438\\u043d\\u0444\\u043e\\u0440\\u043c\\u0430\\u0446\\u0438\\u044f', blank=True)),\n ('leader', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to='studentsapp.Students', verbose_name='\\u0413\\u0440\\u0443\\u043f\\u043f\\u0430')),\n ],\n options={\n 'verbose_name': '\\u0413\\u0440\\u0443\\u043f\\u043f\\u0430',\n 'verbose_name_plural': '\\u0413\\u0440\\u0443\\u043f\\u043f\\u044b',\n },\n bases=(models.Model,),\n ),\n migrations.AlterModelOptions(\n name='students',\n options={'verbose_name': '\\u0421\\u0442\\u0443\\u0434\\u0435\\u043d\\u0442', 'verbose_name_plural': '\\u0421\\u0442\\u0443\\u0434\\u0435\\u043d\\u0442\\u044b'},\n ),\n migrations.AlterField(\n model_name='students',\n name='birthday',\n field=models.DateField(null=True, verbose_name='\\u0414\\u0430\\u0442\\u0430 \\u0440\\u043e\\u0436\\u0434\\u0435\\u043d\\u0438\\u044f', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='students',\n name='first_name',\n field=models.CharField(max_length=255, verbose_name='\\u0418\\u043c\\u044f'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='students',\n name='last_name',\n field=models.CharField(max_length=255, verbose_name='\\u0424\\u0430\\u043c\\u0438\\u043b\\u0438\\u044f'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='students',\n name='middle_name',\n field=models.CharField(max_length=255, null=True, verbose_name='\\u041e\\u0442\\u0447\\u0435\\u0441\\u0442\\u0432\\u043e', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='students',\n name='notes',\n field=models.TextField(default=b'', verbose_name='\\u0414\\u043e\\u043f\\u043e\\u043b\\u043d\\u0438\\u0442\\u0435\\u043b\\u044c\\u043d\\u0430\\u044f \\u0438\\u043d\\u0444\\u043e\\u0440\\u043c\\u0430\\u0446\\u0438\\u044f', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='students',\n name='photo',\n field=models.ImageField(upload_to=b'', null=True, verbose_name='\\u0424\\u043e\\u0442\\u043e', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='students',\n name='ticket',\n field=models.CharField(max_length=255, verbose_name='\\u2116 \\u0411\\u0438\\u043b\\u0435\\u0442'),\n preserve_default=True,\n ),\n ]\n", "sub_path": "studentsapp/migrations/0002_auto_20141205_1718.py", "file_name": "0002_auto_20141205_1718.py", "file_ext": "py", "file_size_in_byte": 3400, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.db", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.db", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterModelOptions", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 60, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}]} +{"seq_id": "34168857", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDatabase connection module. It hides some *complexity* getting connection\nparameters and performing the connection itself.\n\"\"\"\n\nimport os\n\nimport redis\n\nimport config\n\n\ndef connect():\n \"\"\"\n Gains environment values or default for database host and port values and\n connects; it returns the client object.\n\n >>> import database\n >>> db = database.connect()\n \"\"\"\n db = redis.StrictRedis(\n host=os.environ.get(config.REDIS_HOST_KEY, config.REDIS_HOST_DEFAULT),\n port=os.environ.get(config.REDIS_PORT_KEY, config.REDIS_PORT_DEFAULT),\n db=os.environ.get(\n config.REDIS_CONTEXT_KEY, config.REDIS_CONTEXT_DEFAULT\n )\n )\n\n return db\n\n\n# vim: ai ts=4 sts=4 et sw=4 ft=python\n", "sub_path": "src/database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 790, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "redis.StrictRedis", "line_number": 24, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "config.REDIS_HOST_KEY", "line_number": 25, "usage_type": "attribute"}, {"api_name": "config.REDIS_HOST_DEFAULT", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 26, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "config.REDIS_PORT_KEY", "line_number": 26, "usage_type": "attribute"}, {"api_name": "config.REDIS_PORT_DEFAULT", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "config.REDIS_CONTEXT_KEY", "line_number": 28, "usage_type": "attribute"}, {"api_name": "config.REDIS_CONTEXT_DEFAULT", "line_number": 28, "usage_type": "attribute"}]} +{"seq_id": "60856628", "text": "# 用于记录默认配置信息\n\nimport logging\n\n# 默认的配置\nDEFAULT_LOG_LEVEL = logging.INFO # 默认等级\nDEFAULT_LOG_FMT = '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s: %(message)s' # 默认日志格式\nDEFUALT_LOG_DATEFMT = '%Y-%m-%d %H:%M:%S' # 默认时间格式\nDEFAULT_LOG_FILENAME = 'log.log' # 默认日志文件名称\n\n\n# 配置信息放到模块配置中: 好处: 1. 方便写代码 2.为了增加程序健壮性\n\n# 配置开启的爬虫\nSPIDERS = []\n\n# 配置管道\nPIPELINES = []\n\n# 爬虫中间件\nSPIDER_MIDDLEWARES = []\n\n# 下载器中间件\nDOWNLOADER_MIDDLEWARES = []\n\n\n# 配置异步请求的数量\nASYNC_COUNT = 5\n\n# 配置异步类型: thread: 线程池版 , coroutine 协程池版\nASYNC_TYPE = 'thread'\n\n\n# scrapy_plus/conf/default_settings.py\n# 设置调度器的内容是否要持久化\n# 量个值:True和False\n# 如果是True,那么就是使用分布式,就要使用基于Redis队列和去重容器\n# 如果是False, 就不使用分布式, 就使用内存版的队列和去重容器\nSCHEDULER_PERSIST = False\n\n# 是否要开启断点续爬\n# 如果是True, 就表示开启断点续爬; 当程序结束了, 我们保留Redis数据中的请求和指纹数据\n# 如果是False, 就表示关闭断点续爬, 当前程序结束时候, 就清空Redis数据中的请求和指纹数据\nFP_PERSIST = True\n\n# redis默认配置,默认为本机的redis\nREDIS_SET_NAME = 'scrapy_plus_fp_set' # fp集合\nREDIS_QUEUE_NAME = 'scrapy_plus_request_queue' # request队列\nREDIS_HOST = '127.0.0.1'\nREDIS_PORT = 6379\nREDIS_DB = 0", "sub_path": "spider_frame/framework/scrapy_plus/conf/default_settings.py", "file_name": "default_settings.py", "file_ext": "py", "file_size_in_byte": 1572, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}]} +{"seq_id": "7906221", "text": "\"\"\"empty message\n\nRevision ID: 188c6180a5f8\nRevises: cb125447489d\nCreate Date: 2019-01-10 12:34:45.401546\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '188c6180a5f8'\ndown_revision = 'cb125447489d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('instances', sa.Column('locationID', sa.Integer(), nullable=True))\n op.add_column('instances', sa.Column('status', sa.String(length=250), nullable=True))\n op.create_foreign_key(None, 'instances', 'locations', ['locationID'], ['id'], ondelete='CASCADE')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'instances', type_='foreignkey')\n op.drop_column('instances', 'status')\n op.drop_column('instances', 'locationID')\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/188c6180a5f8_.py", "file_name": "188c6180a5f8_.py", "file_ext": "py", "file_size_in_byte": 962, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "140578564", "text": "import pygame\nimport constants\nimport platforms\nSCREEN_HEIGHT = constants.SCREEN_HEIGHT\nSCREEN_WIDTH = constants.SCREEN_WIDTH\n\n\nclass Landscape(pygame.sprite.Sprite):\n def __init__(self, worm):\n pygame.sprite.Sprite.__init__(self, )\n super().__init__()\n MainImage = None\n self.rect = None\n self.platform_list = pygame.sprite.Group()\n self.worm = worm\n\n def update(self):\n self.platform_list.update()\n\n def draw(self, screen):\n screen.fill(constants.BLACK)\n self.platform_list.draw(screen)\n\n\n\nclass LandScape01(Landscape):\n def __init__(self, worms):\n self.platform_list = pygame.sprite.Group()\n self.worms = worms\n\n level = [[platforms.BIG_ISLAND_RIGHT, 630, 720-432],\n [platforms.BIG_ISLAND_LEFT, 0, 720-529],\n [platforms.SMALL_ISLAND, 988, 135]]\n\n # Go through the array above and add platforms\n for platform in level:\n block = platforms.Platform(platform[0])\n block.rect.x = platform[1]\n block.rect.y = platform[2]\n block.players = self.worms\n self.platform_list.add(block)\n\n", "sub_path": "Python Projects/Worms The Game/Project/LandScape.py", "file_name": "LandScape.py", "file_ext": "py", "file_size_in_byte": 1175, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "constants.SCREEN_HEIGHT", "line_number": 4, "usage_type": "attribute"}, {"api_name": "constants.SCREEN_WIDTH", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 14, "usage_type": "attribute"}, {"api_name": "constants.BLACK", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 28, "usage_type": "attribute"}, {"api_name": "platforms.BIG_ISLAND_RIGHT", "line_number": 31, "usage_type": "attribute"}, {"api_name": "platforms.BIG_ISLAND_LEFT", "line_number": 32, "usage_type": "attribute"}, {"api_name": "platforms.SMALL_ISLAND", "line_number": 33, "usage_type": "attribute"}, {"api_name": "platforms.Platform", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "36846691", "text": "import argparse\nimport os\nimport pickle\nfrom utils.dictionary import Dictionary\n\nparser = argparse.ArgumentParser()\nparser.add_argument('data', help='text file to make dictionary from')\nparser.add_argument('out', help='path to write dictionary pickle to')\nparser.add_argument('--max_vocab', type=int, default=100000,\n help='max_words in dictionary')\nargs = parser.parse_args()\n\nassert(os.path.exists(args.data))\ndic = Dictionary()\nfreq = {}\nwith open(args.data, 'r') as f:\n for line in f:\n for word in line.split():\n freq[word] = freq.get(word, 0) + 1\nprint('Total words in all data:{:>10d}'.format(len(freq)))\nprint('Vocab allowed size :{:>10d}'.format(args.max_vocab))\nordered_words = sorted([(f, w) for w, f in freq.items()], reverse=True)\nfor _, word in ordered_words[:args.max_vocab]:\n dic.add_word(word)\n\nwith open(args.out, 'wb') as out_file:\n pickle.dump(dic, out_file)\n", "sub_path": "make_dic.py", "file_name": "make_dic.py", "file_ext": "py", "file_size_in_byte": 928, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "utils.dictionary.Dictionary", "line_number": 14, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "237393973", "text": "#!/usr/bin/env python3\r\n# -*- coding:utf-8 -*-\r\n\r\nimport requests\r\nimport os\r\nfrom bs4 import BeautifulSoup\r\n\r\nbase_url = 'http://www.4j4j.cn'\r\nindex_url = 'http://www.4j4j.cn/beauty/index.html'\r\n\r\n# 获取每个美女详情页的url\r\ndef get_url_list():\r\n response = requests.get(base_url)\r\n response.encoding = 'utf-8'\r\n html = BeautifulSoup(response.text, 'html.parser')\r\n result =[]\r\n for link in html.find_all('p'):\r\n for a in link.find_all(\"a\"):\r\n result.append([(a.get('href'), a.string)])\r\n print(result)\r\n return result\r\n\r\n# 下载图片保存到本地\r\ndef get_img(beauty_url, title):\r\n save_path = r'D:\\labeling & extracting & downloading tools\\crawler\\pic\\beauty' + \"\\\\\"+title\r\n os.mkdir(save_path)\r\n os.chdir(save_path)\r\n print(os.getcwd())\r\n response = requests.get(beauty_url)\r\n response.encoding = 'utf-8'\r\n html = BeautifulSoup(response.text, 'html.parser')\r\n data = html.find('div', {'class': 'beauty_details_imgs_box'})\r\n girls = data.find_all('img')\r\n i = 1\r\n for girl in girls:\r\n girl_url = girl['src']\r\n res = requests.get(girl_url)\r\n res.encoding = 'utf-8'\r\n if res.status_code == 200:\r\n with open('pic_%d.jpg' % i, 'wb') as fp:\r\n fp.write(res.content)\r\n i += 1\r\n\r\n\r\ndef get_page():\r\n url_list = get_url_list()\r\n print(url_list)\r\n for url in url_list:\r\n print(url)\r\n beauty_url = base_url+url[0][0]\r\n title = url[0][1]\r\n print(beauty_url)\r\n print(title)\r\n get_img(beauty_url=beauty_url, title=title)\r\n\r\nif __name__ == '__main__':\r\n get_page()\r\n", "sub_path": "beauty.py", "file_name": "beauty.py", "file_ext": "py", "file_size_in_byte": 1662, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 15, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 29, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "643317670", "text": "from collections import MutableMapping\n\n\nclass Mapping(MutableMapping):\n\n def __init__(self, *args, **kwargs):\n for key, val in dict(*args, **kwargs).items():\n self._update(key, val)\n\n def _update(self, key, value):\n if hasattr(self, '__schema__'):\n cheked = self.__schema__({key: value}).items()\n if cheked:\n key, value = cheked.pop()\n else:\n return\n if isinstance(value, dict):\n self.__dict__[key] = Mapping(value)\n elif isinstance(value, (list, tuple)):\n self.__dict__[key] = []\n for v in value:\n if isinstance(v, dict):\n self.__dict__[key].append(Mapping(v))\n else:\n self.__dict__[key].append(v)\n else:\n self.__dict__[key] = value\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, value):\n self._update(key, value)\n\n def __setattr__(self, key, value):\n self._update(key, value)\n\n def __getarttr__(self, key):\n return self.__dict__[key]\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __iter__(self):\n return iter(self.__dict__)\n\n def __len__(self):\n return len(self.__dict__)\n\n def unwrap(self, d=None):\n if not d:\n d = self.__dict__\n result = {}\n for k, v in d.items():\n if isinstance(v, Mapping):\n result[k] = v.unwrap()\n if isinstance(v, (list, tuple)):\n result[k] = []\n for item in v:\n if isinstance(item, (Mapping, dict)):\n result[k].append(item.unwrap())\n else:\n result[k].append(item)\n else:\n result[k] = v\n return result\n", "sub_path": "ocds/export/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 1897, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "collections.MutableMapping", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "153808525", "text": "from collections import deque\n\n\nclass TreeNode:\n def __init__(self, v, parent=None):\n self.val = v\n self.left = None\n self.right = None\n self.parent = parent\n\n def insert(self, v):\n if v > self.val:\n if self.right is not None:\n self.right.insert(v)\n else:\n self.right = TreeNode(v, self)\n elif v <= self.val:\n if self.left is not None:\n self.left.insert(v)\n else:\n self.left = TreeNode(v, self)\n\n def print_inorder_traversal(self):\n if self.left is not None:\n self.left.print_inorder_traversal()\n if self.parent:\n print [self.parent.val, self.val]\n else:\n print [None, self.val]\n if self.right is not None:\n self.right.print_inorder_traversal()\n\n def print_bfs(self):\n q = deque()\n q.append(self)\n\n while len(q) > 0:\n currnode = q.popleft()\n if currnode.parent:\n print [currnode.parent.val, currnode.val]\n else:\n print [None, currnode.val]\n # print currnode.val\n if currnode.left is not None:\n q.append(currnode.left)\n if currnode.right is not None:\n q.append(currnode.right)\n\n def __str__(self):\n return str(self.val)\n", "sub_path": "practice/treenode.py", "file_name": "treenode.py", "file_ext": "py", "file_size_in_byte": 1410, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "collections.deque", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "495426883", "text": "# stdlib\nimport mock\n\n# 3p\nfrom dns.resolver import Resolver, Timeout, NXDOMAIN\n\n# project\nfrom tests.checks.common import AgentCheckTest\nfrom checks import AgentCheck\n\n\nMETRICS = [\n 'dns.response_time'\n]\n\nSERVICE_CHECK_NAME = 'dns.can_resolve'\n\n\nclass MockDNSAnswer:\n def __init__(self, address):\n self.rrset = MockDNSAnswer.MockRrset(address)\n\n class MockRrset:\n def __init__(self, address):\n self.items = [MockDNSAnswer.MockItem()]\n\n class MockItem:\n def __getattr__(self, name):\n return '127.0.0.1'\n\n\ndef success_query_mock(d_name):\n return MockDNSAnswer('127.0.0.1')\n\n\ndef timeout_query_mock(d_name):\n raise Timeout()\n\n\nclass TestDns(AgentCheckTest):\n CHECK_NAME = 'dns_check'\n\n @mock.patch.object(Resolver, 'query', side_effect=success_query_mock)\n def test_success(self, mocked_query):\n config = {\n 'instances': [{'hostname': 'www.example.org', 'nameserver': '127.0.0.1'}]\n }\n self.run_check(config)\n self.assertMetric('dns.response_time', count=1,\n tags=['nameserver:127.0.0.1', 'resolved_hostname:www.example.org'])\n self.assertServiceCheck(SERVICE_CHECK_NAME, status=AgentCheck.OK,\n tags=['resolved_hostname:www.example.org', 'nameserver:127.0.0.1'])\n self.coverage_report()\n\n @mock.patch.object(Resolver, 'query', side_effect=Timeout())\n def test_timeout(self, mocked_query):\n configs = [\n # short default timeout\n {'init_config': {'default_timeout': 0.1},\n 'instances': [{'hostname': 'www.example.org', 'nameserver': '127.0.0.1'}]},\n # short timeout\n {'instances': [{'hostname': 'www.example.org', 'timeout': 0.1, 'nameserver': '127.0.0.1'}]},\n ]\n for config in configs:\n self.assertRaises(\n Timeout,\n lambda: self.run_check(config)\n )\n self.assertEquals(len(self.metrics), 0)\n self.assertServiceCheck(SERVICE_CHECK_NAME, status=AgentCheck.CRITICAL,\n tags=['resolved_hostname:www.example.org', 'nameserver:127.0.0.1'])\n self.coverage_report()\n\n def test_invalid_config(self):\n configs = [\n # invalid hostname\n {'instances': [{'hostname': 'example'}]},\n # invalid nameserver\n {'instances': [{'hostname': 'www.example.org', 'nameserver': '0.0.0.0'}]}\n ]\n for config in configs:\n self.assertRaises(NXDOMAIN, lambda: self.run_check(config))\n self.assertEquals(len(self.metrics), 0)\n self.assertServiceCheck(SERVICE_CHECK_NAME, status=AgentCheck.CRITICAL)\n self.coverage_report()\n", "sub_path": "tests/checks/mock/test_dns_check.py", "file_name": "test_dns_check.py", "file_ext": "py", "file_size_in_byte": 2781, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "dns.resolver.Timeout", "line_number": 37, "usage_type": "call"}, {"api_name": "tests.checks.common.AgentCheckTest", "line_number": 40, "usage_type": "name"}, {"api_name": "checks.AgentCheck.OK", "line_number": 51, "usage_type": "attribute"}, {"api_name": "checks.AgentCheck", "line_number": 51, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 43, "usage_type": "call"}, {"api_name": "dns.resolver.Resolver", "line_number": 43, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 43, "usage_type": "attribute"}, {"api_name": "dns.resolver.Timeout", "line_number": 66, "usage_type": "argument"}, {"api_name": "checks.AgentCheck.CRITICAL", "line_number": 70, "usage_type": "attribute"}, {"api_name": "checks.AgentCheck", "line_number": 70, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 55, "usage_type": "call"}, {"api_name": "dns.resolver.Resolver", "line_number": 55, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 55, "usage_type": "attribute"}, {"api_name": "dns.resolver.Timeout", "line_number": 55, "usage_type": "call"}, {"api_name": "dns.resolver.NXDOMAIN", "line_number": 82, "usage_type": "argument"}, {"api_name": "checks.AgentCheck.CRITICAL", "line_number": 84, "usage_type": "attribute"}, {"api_name": "checks.AgentCheck", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "638911402", "text": "import json\nfrom torch.utils.data import Dataset\nimport os\nimport numpy as np\nimport argparse\nfrom config_parser import create_config\nfrom tools.standard_scaler import StandardScaler\n\n\nclass METRDataset(Dataset):\n def __init__(self, config, mode, *args, **params):\n self.config = config\n self.mode = mode\n self.data_path = config.get(\"data\", \"%s_data_path\" % mode)\n \n self.file_list = config.get(\"data\", \"%s_file_list\" % mode).split(' ')\n self.data_list = {}\n self.load_mem = config.getboolean(\"data\", \"load_into_mem\")\n self.scaler_path = config.get(\"data\", \"scaler_path\")\n\n if self.load_mem:\n for filename in self.file_list:\n cat_data = np.load(os.path.join(self.data_path, filename))\n print(\"x.shape\", cat_data['x'].shape, \"y.shape\", cat_data['y'].shape)\n self.data_list['x'] = cat_data['x']\n self.data_list['y'] = cat_data['y']\n if self.mode == \"train\":\n mean_std = np.array([self.data_list['x'][..., 0].mean(), self.data_list['x'][..., 0].std()])\n print(\"mean and std of training set:\", mean_std)\n np.save(self.scaler_path, mean_std)\n\n\n def __getitem__(self, item):\n if self.load_mem:\n return {\n \"x\": self.data_list[\"x\"][item],\n \"y\": self.data_list[\"y\"][item]\n }\n else:\n return {\n # \"data\": cv2.imread(os.path.join(self.prefix, self.data_list[item][\"path\"])),\n # \"label\": self.data_list[item][\"label\"]\n }\n\n def __len__(self):\n return len(self.data_list)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', '-c', help=\"specific config file\", required=True)\n parser.add_argument('--gpu', '-g', help=\"gpu id list\")\n parser.add_argument('--checkpoint', help=\"checkpoint file path\")\n args = parser.parse_args()\n\n configFilePath = args.config\n config = create_config(configFilePath)\n mode = \"train\"\n\n x = METRDataset(config, mode)", "sub_path": "dataset/others/METR.py", "file_name": "METR.py", "file_ext": "py", "file_size_in_byte": 2125, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 30, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 50, "usage_type": "call"}, {"api_name": "config_parser.create_config", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "472554958", "text": "#! /usr/bin/env python\n\n\"\"\"\nThis script produces the stacks for emission line luminosity limited samples.\n\"\"\"\nimport sys\nimport os \nfrom os.path import join\nimport glob\nimport numpy as n\nimport SpectraStackingEBOSS as sse\nimport astropy.io.fits as fits\n\nspec_dir = join(os.environ['HOME'],\"SDSS/stacks\")\nspecList = join(spec_dir, \"eboss-elg_0.2_z_1.5.asc\") \noutfile = join(spec_dir, os.path.basename(specList)[:-4]+\".specMatrix\")\nstack=sse.SpectraStackingEBOSS(specList, outfile)\n\ndef getSpectra(path_to_spectrum):\n\thdulist = fits.open(path_to_spectrum)\n\twave = 10**hdulist[1].data['loglam']\n\tok=(wave>3740)&(wave<9604)\n\tflux = hdulist[1].data['flux']\n\thdulist.close()\n\treturn wave[ok], flux[ok]\n\nfor IDX_j in n.arange(0, len(stack.plates), 4096):\n\tIDX_min=IDX_j\n\tIDX_max=IDX_j+4096\n\tIDX_str=str(IDX_min).zfill(6)+'-'+str(IDX_max).zfill(6)\n\tsamp_plates, samp_mjds, samp_fiberids, samp_redshifts = stack.plates[IDX_min:IDX_max], stack.mjds[IDX_min:IDX_max], stack.fiberids[IDX_min:IDX_max], stack.redshifts[IDX_min:IDX_max]\n\n\tFLUXES = n.zeros((samp_plates.shape[0], 4096))\n\tdata = []\n\tbad_ids = []\n\tfor jj, (plate, mjd, fiber, redshift) in enumerate(zip( samp_plates, samp_mjds, samp_fiberids, samp_redshifts )):\n\t\tpath_to_spectrum = sse.get_path_to_spectrum_v5_11_0(plate, mjd, fiber)\n\t\tif os.path.isfile(path_to_spectrum):\n\t\t\twl,fl=getSpectra(path_to_spectrum)\n\t\t\tdata.append([fl.shape[0], wl.min(), wl.max()])\n\t\t\tif fl.shape[0]==4096:\n\t\t\t\tFLUXES[jj]=fl\n\t\t\t\twavelength=wl\n\n\tn.savetxt(stack.out_file+'.'+IDX_str+'.dat', FLUXES)\n\tn.savetxt(stack.out_file+'.wavelength.'+IDX_str+'.dat', wavelength)\n\tn.savetxt(stack.out_file+'.shapes.'+IDX_str+'.dat', n.array(data) )\n\tn.savetxt(stack.out_file+'.list.'+IDX_str+'.dat', n.array([samp_plates, samp_mjds, samp_fiberids, samp_redshifts]) )\n\n", "sub_path": "galaxy/bin_eBOSS_ELG/stack_spectra_ELG_all.py", "file_name": "stack_spectra_ELG_all.py", "file_ext": "py", "file_size_in_byte": 1785, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "SpectraStackingEBOSS.SpectraStackingEBOSS", "line_number": 17, "usage_type": "call"}, {"api_name": "astropy.io.fits.open", "line_number": 20, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "SpectraStackingEBOSS.get_path_to_spectrum_v5_11_0", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.savetxt", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "136451374", "text": "import torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom torch.distributions.categorical import Categorical\n\nfrom .baseRNN import BaseRNN\nfrom machine.util.gumbel import gumbel_softmax\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Sender(BaseRNN):\n\t\"\"\"\n\t\tApplies a rnn cell to an input sequence and uses Gumbel softmax sampling to\n\t\tgenerate a output sequence.\n\n\t\tArgs:\n\t\t\tvocab_size (int): size of the vocabulary\n\t\t\toutput_len (int): the length of the sequence to be generated\n\t\t\tembedding_size (int): the size of the embedding of input variables\n\t\t\thidden_size (int): the size of the hidden dimension of the rnn\n\t\t\tsos_id (int): index of the start of sequence symbol\n\t\t\teos_id (int): index of the end of sequence symbol\n\t\t\trnn_cell (str, optional): type of RNN cell (default: gru)\n\t\t\tgreedy (bool, optional): True if use argmax at prediction time, False if sample (default: False)\n\t\t\tcompute_lengths (bool, optional): True if the length of each sequence in the batch is to be computed \n\t\t\tby looking for eos tokens.\n\n\t\tInputs:\n\t\t\ttau (float): Temperature to be used for Gumbel Softmax.\n\t\t\thidden_state (torch.tensor, optional): The hidden state to start the decoding. (default=None)\n\t\t\tShape [batch_size, hidden_size]. If None, batch_size=1.\n\n\t\tOutputs:\n\t\t\toutput_sequence (torch.tensor): The generated decoded sequences. Shape [batch_size, output_len+1]\n\t\t\tE.g. of a sequence at prediction time [sos_id, predicted_1, predicted_2,...., predicted_outputlen]\n\t\t\tsequence_lengths (torch.tensor): The lengths of all the sequences in the batch. Shape [batch_size]\n\t\t\tOnly returned if compute_lenghts=True\n\n\t\"\"\"\n\n\tdef __init__(self, vocab_size, output_len, embedding_size, hidden_size,\n\t\t\t\t sos_id, eos_id, rnn_cell='gru', greedy=False, compute_lengths=False):\n\t\tsuper().__init__(vocab_size, output_len, hidden_size,\n\t\t\t\t\t\t input_dropout_p=0, dropout_p=0,\n\t\t\t\t\t\t n_layers=1, rnn_cell=rnn_cell, relaxed=True)\n\n\t\tself.output_len = output_len\n\t\tself.embedding_size = embedding_size\n\t\tself.sos_id = sos_id\n\t\tself.eos_id = eos_id\n\t\tself.greedy = greedy\n\t\tself.compute_lengths = compute_lengths\n\n\t\tself.embedding = nn.Parameter(torch.empty((self.vocab_size, self.embedding_size), dtype=torch.float32))\n\t\tself.rnn = self.rnn_cell(self.embedding_size, self.hidden_size)\n\t\tself.linear_out = nn.Linear(self.hidden_size, self.vocab_size)\n\n\t\tself._reset_parameters()\n\n\tdef _reset_parameters(self):\n\t\tnn.init.normal_(self.embedding, 0.0, 0.1)\n\n\t\tnn.init.constant_(self.linear_out.weight, 0)\n\t\tnn.init.constant_(self.linear_out.bias, 0)\n\n\t\tnn.init.xavier_uniform_(self.rnn.weight_ih)\n\t\tnn.init.orthogonal_(self.rnn.weight_hh)\n\t\tnn.init.constant_(self.rnn.bias_ih, val=0)\n\t\t# # cuDNN bias order: https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnRNNMode_t\n\t\t# # add some positive bias for the forget gates [b_i, b_f, b_o, b_g] = [0, 1, 0, 0]\n\t\tnn.init.constant_(self.rnn.bias_hh, val=0)\n\t\tnn.init.constant_(self.rnn.bias_hh[self.hidden_size:2 * self.hidden_size], val=1)\n\n\tdef _init_state(self, hidden_state, rnn_type):\n\t\t\"\"\"\n\t\t\tHandles the initialization of the first hidden state of the decoder.\n\t\t\tHidden state + cell state in the case of an LSTM cell or\n\t\t\tonly hidden state in the case of a GRU cell.\n\n\t\t\tArgs:\n\t\t\t hidden_state (torch.tensor): The state to initialize the decoding with.\n\t\t\t rnn_type (type): Type of the rnn cell.\n\n\t\t\tReturns:\n\t\t\t state: (h, c) if LSTM cell, h if GRU cell\n\t\t\t batch_size: Based on the given hidden_state if not None, 1 otherwise\n\t\t\"\"\"\n\n\t\t# h0\n\t\tif hidden_state is None:\n\t\t\tbatch_size = 1\n\t\t\th = torch.zeros([batch_size, self.hidden_size], device=device)\n\t\telse:\n\t\t\tbatch_size = hidden_state.shape[0]\n\t\t\th = hidden_state # batch_size, hidden_size\n\t\t\n\t\t# c0\n\t\tif rnn_type is nn.LSTMCell:\n\t\t\tc = torch.zeros([batch_size, self.hidden_size], device=device)\n\n\t\t\tstate = (h,c)\n\t\telse:\n\t\t\tstate = h\n\n\t\treturn state, batch_size\n\n\tdef _calculate_seq_len(self, seq_lengths, token, initial_length, seq_pos, \n\t\tn_sos_symbols, is_discrete):\n\t\t\"\"\"\n\t\t\tCalculates the lengths of each sequence in the batch in-place.\n\t\t\tThe length goes from the start of the sequece up until the eos_id is predicted.\n\t\t\tIf it is not predicted, then the length is output_len + n_sos_symbols.\n\n\t\t\tArgs:\n\t\t\t seq_lengths (torch.tensor): To keep track of the sequence lengths.\n\t\t\t token (torch.tensor): Batch of predicted tokens at this timestep.\n\t\t\t initial_length (int): The max possible sequence length (output_len + n_sos_symbols).\n\t\t\t seq_pos (int): The current timestep.\n\t\t\t n_sos_symbols (int): Number of sos symbols at the beginning of the sequence.\n\t\t\t is_discrete (bool): True if Gumbel Softmax is used, False otherwise.\n\t\t\t \n\t\t\"\"\"\n\t\n\t\tfor idx, elem in enumerate(token): \n\t\t\tif seq_lengths[idx] == initial_length:\n\t\t\t\tif (is_discrete and elem == self.eos_id) or (not is_discrete and elem[self.eos_id] == 1.0):\n\t\t\t\t\tseq_lengths[idx] = seq_pos + n_sos_symbols\n\n\tdef forward(self, tau, hidden_state=None):\n\t\t\"\"\"\n\t\tPerforms a forward pass. If training, use Gumbel Softmax (hard) for sampling, else use \n\t\tdiscrete sampling.\n\t\t\"\"\"\n\n\t\tstate, batch_size = self._init_state(hidden_state, type(self.rnn))\n\n\t\t# Init output\n\t\tif self.training:\n\t\t\toutput = [torch.zeros((batch_size, self.vocab_size), dtype=torch.float32, device=device)]\n\t\t\toutput[0][:, self.sos_id] = 1.0\n\t\telse:\n\t\t\toutput = [torch.full((batch_size, ), fill_value=self.sos_id, dtype=torch.int64, device=device)]\n\n\t\t# Keep track of sequence lengths\n\t\tif self.compute_lengths:\n\t\t\tn_sos_symbols = 1\n\t\t\tinitial_length = self.output_len + n_sos_symbols\n\t\t\tseq_lengths = torch.ones([batch_size], dtype=torch.int64, device=device) * initial_length\n\n\t\tfor i in range(self.output_len):\n\t\t\tif self.training:\n\t\t\t\temb = torch.matmul(output[-1], self.embedding)\n\t\t\telse:\n\t\t\t\temb = self.embedding[output[-1]]\n\n\t\t\tstate = self.rnn(emb, state)\n\n\t\t\tif type(self.rnn) is nn.LSTMCell:\n\t\t\t\th, c = state\n\t\t\telse:\n\t\t\t\th = state\n\t\t\t\n\t\t\tp = F.softmax(self.linear_out(h), dim=1)\n\n\t\t\tif self.training:\n\t\t\t\ttoken = gumbel_softmax(p, tau, hard=True)\n\t\t\telse:\n\t\t\t\tif self.greedy:\n\t\t\t\t\t_, token = torch.max(p, -1)\n\t\t\t\telse:\n\t\t\t\t\ttoken = Categorical(p).sample()\n\n\t\t\t\tif batch_size == 1:\n\t\t\t\t\ttoken = token.unsqueeze(0)\n\n\t\t\toutput.append(token)\n\n\t\t\tif self.compute_lengths:\n\t\t\t\tself._calculate_seq_len(\n\t\t\t\t\tseq_lengths, token, initial_length, seq_pos=i+1, \n\t\t\t\t\tn_sos_symbols=n_sos_symbols, is_discrete=not self.training)\n\n\n\t\toutputs = torch.stack(output, dim=1)\n\n\t\tif self.compute_lengths:\n\t\t\treturn (outputs, seq_lengths)\n\t\telse:\n\t\t\treturn outputs", "sub_path": "machine/models/Sender.py", "file_name": "Sender.py", "file_ext": "py", "file_size_in_byte": 6577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torch.device", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 9, "usage_type": "attribute"}, {"api_name": "baseRNN.BaseRNN", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.empty", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 64, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.init.orthogonal_", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn.LSTMCell", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 140, "usage_type": "attribute"}, {"api_name": "torch.full", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.ones", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 149, "usage_type": "attribute"}, {"api_name": "torch.matmul", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.nn.LSTMCell", "line_number": 159, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 159, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 164, "usage_type": "name"}, {"api_name": "machine.util.gumbel.gumbel_softmax", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.distributions.categorical.Categorical", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 185, "usage_type": "call"}]} +{"seq_id": "576563162", "text": "import sys\nimport pathlib\nimport matplotlib.pyplot as plt\n\nimport alpha_vantage\nimport plot_style\n\n\ndef show_history(symbol, interval='MONTHLY'):\n data = alpha_vantage.get_stock_price_history(\n symbol,\n interval,\n adjusted=False\n )\n\n plot_style.line()\n plt.title(f'{symbol} price history')\n plt.plot(list(data.keys()), list(data.values()))\n\n pathlib.Path('img/history').mkdir(parents=True, exist_ok=True)\n plt.savefig(f'img/history/{symbol}.png')\n plt.close()\n\n\nshow_history(sys.argv[1])\n", "sub_path": "history.py", "file_name": "history.py", "file_ext": "py", "file_size_in_byte": 533, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "alpha_vantage.get_stock_price_history", "line_number": 10, "usage_type": "call"}, {"api_name": "plot_style.line", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}]} +{"seq_id": "260244251", "text": "import keras.backend\nimport keras_retinanet.backend\n\nimport numpy as np\n\n\ndef bbox_transform_inv(boxes, deltas):\n boxes = keras.backend.reshape(boxes, (-1, 4))\n deltas = keras.backend.reshape(deltas, (-1, 4))\n\n widths = boxes[:, 2] - boxes[:, 0]\n heights = boxes[:, 3] - boxes[:, 1]\n ctr_x = boxes[:, 0] + 0.5 * widths\n ctr_y = boxes[:, 1] + 0.5 * heights\n\n dx = deltas[:, 0]\n dy = deltas[:, 1]\n dw = deltas[:, 2]\n dh = deltas[:, 3]\n\n pred_ctr_x = dx * widths + ctr_x\n pred_ctr_y = dy * heights + ctr_y\n pred_w = keras.backend.exp(dw) * widths\n pred_h = keras.backend.exp(dh) * heights\n\n pred_boxes_x1 = pred_ctr_x - 0.5 * pred_w\n pred_boxes_y1 = pred_ctr_y - 0.5 * pred_h\n pred_boxes_x2 = pred_ctr_x + 0.5 * pred_w\n pred_boxes_y2 = pred_ctr_y + 0.5 * pred_h\n\n pred_boxes = keras.backend.stack([pred_boxes_x1, pred_boxes_y1, pred_boxes_x2, pred_boxes_y2], axis=1)\n pred_boxes = keras.backend.expand_dims(pred_boxes, axis=0)\n\n return pred_boxes\n\n\ndef shift(shape, stride, anchors):\n \"\"\"\n Produce shifted anchors based on shape of the map and stride size\n \"\"\"\n shift_x = (keras.backend.arange(0, shape[1], dtype=keras.backend.floatx()) + keras.backend.constant(0.5, dtype=keras.backend.floatx())) * stride\n shift_y = (keras.backend.arange(0, shape[0], dtype=keras.backend.floatx()) + keras.backend.constant(0.5, dtype=keras.backend.floatx())) * stride\n\n shift_x, shift_y = keras_retinanet.backend.meshgrid(shift_x, shift_y)\n shift_x = keras.backend.reshape(shift_x, [-1])\n shift_y = keras.backend.reshape(shift_y, [-1])\n\n shifts = keras.backend.stack([\n shift_x,\n shift_y,\n shift_x,\n shift_y\n ], axis=0)\n\n shifts = keras.backend.transpose(shifts)\n number_of_anchors = keras.backend.shape(anchors)[0]\n\n k = keras.backend.shape(shifts)[0] # number of base points = feat_h * feat_w\n\n shifted_anchors = keras.backend.reshape(anchors, [1, number_of_anchors, 4]) + keras.backend.cast(keras.backend.reshape(shifts, [k, 1, 4]), keras.backend.floatx())\n shifted_anchors = keras.backend.reshape(shifted_anchors, [k * number_of_anchors, 4])\n\n return shifted_anchors\n\n\ndef anchors(base_size, ratios, scales):\n \"\"\"\n Generates a regular grid of multi-aspect and multi-scale anchor boxes.\n \"\"\"\n base_anchor = keras.backend.cast([1, 1, base_size, base_size], keras.backend.floatx()) - 1\n base_anchor = keras.backend.expand_dims(base_anchor, 0)\n\n ratio_anchors = _ratio_enum(base_anchor, ratios)\n anchors = _scale_enum(ratio_anchors, scales)\n\n return anchors\n\n\ndef _mkanchors(ws, hs, x_ctr, y_ctr):\n \"\"\"\n Given a vector of widths (ws) and heights (hs) around a center\n (x_ctr, y_ctr), output a set of anchors (windows).\n \"\"\"\n\n col1 = keras.backend.reshape(x_ctr - 0.5 * (ws - 1), (-1, 1))\n col2 = keras.backend.reshape(y_ctr - 0.5 * (hs - 1), (-1, 1))\n col3 = keras.backend.reshape(x_ctr + 0.5 * (ws - 1), (-1, 1))\n col4 = keras.backend.reshape(y_ctr + 0.5 * (hs - 1), (-1, 1))\n anchors = keras.backend.concatenate((col1, col2, col3, col4), axis=1)\n\n return anchors\n\n\ndef _ratio_enum(anchor, ratios):\n \"\"\"\n Enumerate a set of anchors for each aspect ratio wrt an anchor.\n \"\"\"\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n size = w * h\n size_ratios = size / ratios\n ws = keras.backend.round(keras.backend.sqrt(size_ratios))\n hs = keras.backend.round(ws * ratios)\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\n\ndef _scale_enum(anchor, scales):\n \"\"\"\n Enumerate a set of anchors for each scale wrt an anchor.\n \"\"\"\n\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n ws = keras.backend.expand_dims(w, 1) * scales\n hs = keras.backend.expand_dims(h, 1) * scales\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\n\ndef _whctrs(anchor):\n \"\"\"\n Return width, height, x center, and y center for an anchor (window).\n \"\"\"\n w = anchor[:, 2] - anchor[:, 0] + 1\n h = anchor[:, 3] - anchor[:, 1] + 1\n x_ctr = anchor[:, 0] + 0.5 * (w - 1)\n y_ctr = anchor[:, 1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr\n", "sub_path": "keras_retinanet/backend/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 4169, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "keras.backend.backend.reshape", "line_number": 8, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 8, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 8, "usage_type": "name"}, {"api_name": "keras.backend.backend.reshape", "line_number": 9, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 9, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 9, "usage_type": "name"}, {"api_name": "keras.backend.backend.exp", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 23, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 23, "usage_type": "name"}, {"api_name": "keras.backend.backend.exp", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 24, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 24, "usage_type": "name"}, {"api_name": "keras.backend.backend.stack", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 31, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 31, "usage_type": "name"}, {"api_name": "keras.backend.backend.expand_dims", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 32, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 32, "usage_type": "name"}, {"api_name": "keras.backend.backend.arange", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 41, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 41, "usage_type": "name"}, {"api_name": "keras.backend.backend.floatx", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.backend.backend.constant", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.backend.backend.arange", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 42, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 42, "usage_type": "name"}, {"api_name": "keras.backend.backend.floatx", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.backend.backend.constant", "line_number": 42, "usage_type": "call"}, {"api_name": "keras_retinanet.backend.backend.meshgrid", "line_number": 44, "usage_type": "call"}, {"api_name": "keras_retinanet.backend.backend", "line_number": 44, "usage_type": "attribute"}, {"api_name": "keras_retinanet.backend", "line_number": 44, "usage_type": "name"}, {"api_name": "keras.backend.backend.reshape", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 45, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 45, "usage_type": "name"}, {"api_name": "keras.backend.backend.reshape", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 46, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 46, "usage_type": "name"}, {"api_name": "keras.backend.backend.stack", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 48, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 48, "usage_type": "name"}, {"api_name": "keras.backend.backend.transpose", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 55, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 55, "usage_type": "name"}, {"api_name": "keras.backend.backend.shape", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 56, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 56, "usage_type": "name"}, {"api_name": "keras.backend.backend.shape", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 58, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 58, "usage_type": "name"}, {"api_name": "keras.backend.backend.reshape", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 60, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 60, "usage_type": "name"}, {"api_name": "keras.backend.backend.cast", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.backend.backend.floatx", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.backend.backend.reshape", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 61, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 61, "usage_type": "name"}, {"api_name": "keras.backend.backend.cast", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 70, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 70, "usage_type": "name"}, {"api_name": "keras.backend.backend.floatx", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.backend.backend.expand_dims", "line_number": 71, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 71, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 71, "usage_type": "name"}, {"api_name": "keras.backend.backend.reshape", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 85, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 85, "usage_type": "name"}, {"api_name": "keras.backend.backend.reshape", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 86, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 86, "usage_type": "name"}, {"api_name": "keras.backend.backend.reshape", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 87, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 87, "usage_type": "name"}, {"api_name": "keras.backend.backend.reshape", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 88, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 88, "usage_type": "name"}, {"api_name": "keras.backend.backend.concatenate", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 89, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 89, "usage_type": "name"}, {"api_name": "keras.backend.backend.round", "line_number": 101, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 101, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 101, "usage_type": "name"}, {"api_name": "keras.backend.backend.sqrt", "line_number": 101, "usage_type": "call"}, {"api_name": "keras.backend.backend.round", "line_number": 102, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 102, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 102, "usage_type": "name"}, {"api_name": "keras.backend.backend.expand_dims", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 113, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 113, "usage_type": "name"}, {"api_name": "keras.backend.backend.expand_dims", "line_number": 114, "usage_type": "call"}, {"api_name": "keras.backend.backend", "line_number": 114, "usage_type": "attribute"}, {"api_name": "keras.backend", "line_number": 114, "usage_type": "name"}]} +{"seq_id": "411703694", "text": "import os\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\nfrom whoosh.fields import Schema, TEXT, KEYWORD, NUMERIC, DATETIME\nfrom whoosh.index import create_in \nfrom base.models import Page\nimport logging\n\nlogger = logging.getLogger(\"vincis.debug.log\")\n\n\nclass Command(BaseCommand):\n args = \"nothing\"\n help = \"index vincis\"\n \n def handle(self, *args, **kwargs):\n \"\"\" Creates the index iterating over all the pages of the site \"\"\"\n schema = Schema(pk=NUMERIC(unique=True, stored=True),\n title=TEXT, \n summary=TEXT, \n tags=KEYWORD(commas=True, scorable=True),\n pub_date=DATETIME(sortable=True))\n \n if not os.path.exists(settings.INDEX):\n os.mkdir(settings.INDEX)\n \n ix = create_in(settings.INDEX, schema)\n writer = ix.writer()\n objects = Page.objects.all()\n for object in objects:\n tags = map(lambda x: x.title, object.tags.all())\n writer.add_document(title=object.title, \n summary=object.summary, \n tags=\",\".join(tags),\n pk=object.pk,\n pub_date=object.pub_date)\n writer.commit()", "sub_path": "search/management/commands/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 1349, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 12, "usage_type": "name"}, {"api_name": "whoosh.fields.Schema", "line_number": 18, "usage_type": "call"}, {"api_name": "whoosh.fields.NUMERIC", "line_number": 18, "usage_type": "call"}, {"api_name": "whoosh.fields.TEXT", "line_number": 19, "usage_type": "name"}, {"api_name": "whoosh.fields.TEXT", "line_number": 20, "usage_type": "name"}, {"api_name": "whoosh.fields.KEYWORD", "line_number": 21, "usage_type": "call"}, {"api_name": "whoosh.fields.DATETIME", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.conf.settings.INDEX", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 24, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.settings.INDEX", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 25, "usage_type": "name"}, {"api_name": "whoosh.index.create_in", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.settings.INDEX", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 27, "usage_type": "name"}, {"api_name": "base.models.Page.objects.all", "line_number": 29, "usage_type": "call"}, {"api_name": "base.models.Page.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "base.models.Page", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "313200095", "text": "from spyne.decorator import rpc \nfrom spyne.model.complex import Iterable\nfrom spyne.model.primitive import Unicode, Boolean, Integer\nfrom spyne.service import ServiceBase\nfrom datetime import datetime\nfrom model import Event, Section\n\nimport requests\n\nclass EventCreationService(ServiceBase):\n @rpc(Event, Iterable(Section), _returns=Boolean)\n def CreateEvent(ctx,event,section_list):\n url = ctx.udc.ticket_url;\n\n # if (event.start_at < event.end_at):\n # return False\n \n create_event_payload = {\n 'event_name': event.name, \n 'partner_id':event.partner_id, \n 'start_at': event.start_at, \n 'end_at': event.end_at, \n 'description': event.description, \n 'location': event.location\n }\n\n print(\"Event: \" , create_event_payload)\n create_event_resp = requests.post(url+'/event', json = create_event_payload)\n \n if (create_event_resp.ok):\n # Tolong request nya ngereturn id yang abis diinsert ya\n create_event_json = create_event_resp.json()\n event_id = create_event_json[\"id\"]\n create_ticket_payload = {\"event_id\": event_id, \"section_list\": []}\n \n for section in section_list:\n create_ticket_payload[\"section_list\"].append(\n {\n 'name': section.name, \n 'capacity': section.capacity, \n 'price': section.price, \n 'has_seat': section.has_seat\n })\n\n print(\"Section list: \" , create_ticket_payload)\n\n create_ticket_resp = requests.post(url+'/section', json = create_ticket_payload)\n if (create_ticket_request.ok):\n return True\n else:\n return False\n \n else:\n return False\n\n \n\n\n", "sub_path": "service/EventCreationService.py", "file_name": "EventCreationService.py", "file_ext": "py", "file_size_in_byte": 1680, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "spyne.service.ServiceBase", "line_number": 10, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 47, "usage_type": "call"}, {"api_name": "spyne.decorator.rpc", "line_number": 11, "usage_type": "call"}, {"api_name": "model.Event", "line_number": 11, "usage_type": "argument"}, {"api_name": "spyne.model.complex.Iterable", "line_number": 11, "usage_type": "call"}, {"api_name": "model.Section", "line_number": 11, "usage_type": "argument"}, {"api_name": "spyne.model.primitive.Boolean", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "579457594", "text": "__author__ = 'Max'\n\nimport Wizard.moddevwiz\nfrom ArtieEditor.RobotConfigGUI.robconfiggui import *\nfrom ArtieEditor.aemain import *\nfrom ArtieEditor.PartConfigGUI.partconfiggui import *\n\n\ndef run_wizard(parent):\n \"\"\"\n Runs the device configuration wizard.\n :rtype: void\n :return: void\n \"\"\"\n app = wx.App()\n frame = Wizard.moddevwiz.ModDevWiz(parent)\n app.MainLoop()\n\n\n###########################################################################\n## Python code generated with wxFormBuilder (version Jun 5 2014)\n## http://www.wxformbuilder.org/\n##\n## PLEASE DO \"NOT\" EDIT THIS FILE!\n###########################################################################\n\nimport wx\nimport wx.xrc\n\n###########################################################################\n## Class MainGUI\n###########################################################################\n\n\nclass MainGUI ( wx.Frame ):\n def __init__( self, parent ):\n wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 752,552 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )\n\n self.Bind(wx.EVT_CLOSE, self.cancel)\n\n self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )\n\n bSizer1 = wx.BoxSizer( wx.VERTICAL )\n\n self.m_panel1 = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )\n bSizer2 = wx.BoxSizer( wx.VERTICAL )\n\n self.m_panel2 = wx.Panel( self.m_panel1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )\n bSizer3 = wx.BoxSizer( wx.VERTICAL )\n\n m_radioBox1Choices = [ u\"Robot Configuration Wizard - make/modify robots\", \\\n u\"Device Wizard - add/modify devices like servos\", \\\n u\"Robot Part Wizard - add/modify parts for your robot\", u\"Artie Editor - build your robot's code\"]\n self.m_radioBox1 = wx.RadioBox( self.m_panel2, wx.ID_ANY, u\"wxRadioBox\", wx.DefaultPosition, wx.DefaultSize, m_radioBox1Choices, 1, wx.RA_SPECIFY_COLS )\n self.m_radioBox1.SetSelection( 2 )\n bSizer3.Add( self.m_radioBox1, 1, wx.ALL|wx.EXPAND, 5 )\n\n\n self.m_panel2.SetSizer( bSizer3 )\n self.m_panel2.Layout()\n bSizer3.Fit( self.m_panel2 )\n bSizer2.Add( self.m_panel2, 5, wx.EXPAND |wx.ALL, 5 )\n\n self.m_panel3 = wx.Panel( self.m_panel1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )\n bSizer4 = wx.BoxSizer( wx.HORIZONTAL )\n\n self.m_button_cancel = wx.Button( self.m_panel3, wx.ID_ANY, u\"Cancel\", wx.DefaultPosition, wx.DefaultSize, 0 )\n bSizer4.Add( self.m_button_cancel, 0, wx.ALL, 5 )\n\n self.m_button_launch = wx.Button( self.m_panel3, wx.ID_ANY, u\"Launch\", wx.DefaultPosition, wx.DefaultSize, 0 )\n bSizer4.Add( self.m_button_launch, 0, wx.ALL, 5 )\n\n\n self.m_panel3.SetSizer( bSizer4 )\n self.m_panel3.Layout()\n bSizer4.Fit( self.m_panel3 )\n bSizer2.Add( self.m_panel3, 1, wx.EXPAND |wx.ALL, 5 )\n\n\n self.m_panel1.SetSizer( bSizer2 )\n self.m_panel1.Layout()\n bSizer2.Fit( self.m_panel1 )\n bSizer1.Add( self.m_panel1, 1, wx.EXPAND |wx.ALL, 5 )\n\n\n self.SetSizer( bSizer1 )\n self.Layout()\n\n self.Centre( wx.BOTH )\n\n # Connect Events\n self.m_button_cancel.Bind( wx.EVT_BUTTON, self.cancel )\n self.m_button_launch.Bind( wx.EVT_BUTTON, self.launch )\n\n self.Show()\n\n def __del__( self ):\n pass\n\n def cancel( self, event ):\n exit(0)\n\n def launch( self, event ):\n choice = self.m_radioBox1.GetSelection()\n if choice == 0:\n run_robotconfig_gui(self)\n elif choice == 1:\n run_wizard(self)\n elif choice == 2:\n run_partconfig_gui(self)\n else:\n run_artie_editor(self)\n\n\nif __name__ == '__main__':\n app = wx.App()\n frame = MainGUI(None)\n app.MainLoop()\n", "sub_path": "AFMain.py", "file_name": "AFMain.py", "file_ext": "py", "file_size_in_byte": 3955, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "Wizard.moddevwiz.moddevwiz.ModDevWiz", "line_number": 16, "usage_type": "call"}, {"api_name": "Wizard.moddevwiz.moddevwiz", "line_number": 16, "usage_type": "attribute"}, {"api_name": "Wizard.moddevwiz", "line_number": 16, "usage_type": "name"}, {"api_name": "wx.Frame", "line_number": 35, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 37, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 37, "usage_type": "attribute"}, {"api_name": "wx.ID_ANY", "line_number": 37, "usage_type": "attribute"}, {"api_name": "wx.EmptyString", "line_number": 37, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 37, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 37, "usage_type": "call"}, {"api_name": "wx.DEFAULT_FRAME_STYLE", "line_number": 37, "usage_type": "attribute"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 37, "usage_type": "attribute"}, {"api_name": "wx.EVT_CLOSE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 41, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 43, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 43, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 45, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 45, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 45, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 45, "usage_type": "attribute"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 45, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 46, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 46, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 48, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 48, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 48, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 48, "usage_type": "attribute"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 48, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 49, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 49, "usage_type": "attribute"}, {"api_name": "wx.RadioBox", "line_number": 54, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 54, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 54, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 54, "usage_type": "attribute"}, {"api_name": "wx.RA_SPECIFY_COLS", "line_number": 54, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 56, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 56, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 62, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 62, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 64, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 64, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 64, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 64, "usage_type": "attribute"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 64, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 65, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 65, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 67, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 67, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 67, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 67, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 68, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 70, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 70, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 70, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 70, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 71, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 77, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 77, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 83, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 83, "usage_type": "attribute"}, {"api_name": "wx.BOTH", "line_number": 89, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 92, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 93, "usage_type": "attribute"}, {"api_name": "wx.App", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "198442517", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"Framework to Present results from pickle on\"\"\"\n\n__author__ = \"\"\"Rajesh Rao\"\"\"\n__email__ = 'rajeshmprao@gmail.com'\n__version__ = '0.1.0'\n\nimport pickle\nimport matplotlib.pyplot as plt\nfrom technical_indicators.utils import DateTimeAwareEncoder\nimport pyperclip\nimport json\nimport numpy as np\ndef draw_results(date, equity, cash, drawdown):\n fig, ax1 = plt.subplots()\n\n color = 'tab:blue'\n ax1.set_xlabel('Date')\n ax1.set_ylabel('Equity', color=color)\n ax1.plot(date, equity, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n color = 'tab:green'\n ax2.set_ylabel('Drawdown', color=color) # we already handled the x-label with ax1\n ax2.plot(date, drawdown, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n color = 'tab:red'\n ax2.plot(date, cash, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n plt.show()\n\ndef presentation():\n with open('results/2019-01-27/wi_cmf_15_200sma_sl.p', \"rb\") as f:\n data = pickle.load(f)\n \n date_list = data[\"date_list\"]\n equity_list = [x/100 for x in data[\"equity_list\"]]\n cash_list = data[\"cash_list\"]\n drawdown_list = data[\"drawdown_list\"]\n past_pos = data[\"past_pos\"] \n past_pos.sort(key = lambda x:x[\"PL\"]/(x[\"Buy Price\"] * x[\"Buy Quantity\"]), reverse = True)\n cur_pos = data[\"cur_pos\"]\n cur_pos.sort(key = lambda x:x[\"PL\"]/(x[\"Buy Price\"] * x[\"Buy Quantity\"]), reverse = True)\n\n past_pos_json = json.dumps(past_pos, cls=DateTimeAwareEncoder)\n pyperclip.copy(past_pos_json)\n input(past_pos_json)\n cur_pos_json = json.dumps(cur_pos, cls=DateTimeAwareEncoder)\n pyperclip.copy(cur_pos_json)\n input(cur_pos_json)\n\n\n from_date = date_list[0]\n to_date = date_list[-1]\n profit_pos = [x for x in past_pos if x[\"PL\"] >= 0] + [x for x in cur_pos if x[\"PL\"] >= 0]\n loss_pos = [x for x in past_pos if x[\"PL\"] < 0] + [x for x in cur_pos if x[\"PL\"] < 0]\n profit_pos_number = len(profit_pos)\n loss_pos_number = len(loss_pos)\n total_pos = profit_pos_number + loss_pos_number\n winloss_ratio = len(profit_pos)/ (len(profit_pos) + len(loss_pos))\n \n avg_profit = np.mean([x[\"PL\"]/(x[\"Buy Price\"] * x[\"Buy Quantity\"]) for x in profit_pos])\n avg_loss = np.mean([x[\"PL\"]/(x[\"Buy Price\"] * x[\"Buy Quantity\"]) for x in loss_pos])\n\n avg_profit_days = np.mean([(x[\"Sell Date\"] - x[\"Buy Date\"]).days for x in profit_pos])\n avg_loss_days = np.mean([(x[\"Sell Date\"] - x[\"Buy Date\"]).days for x in loss_pos])\n\n max_drawdown = max(drawdown_list)\n cur_drawdown = drawdown_list[-1]\n cur_cash_pos = cash_list[-1]\n\n cur_value = equity_list[-1]\n beginning_value = equity_list[0]\n\n print(\"Results:\")\n print(\"From Date = {0}\\nTo Date = {1}\\nTotal Positions = {2}\\nProfit Positions = {3}\\nLoss Positions = {4}\\nWinLoss Ratio = {5}\\nAverage Win = {6}\\nAverage Loss = {7}\\nAverage Win Days = {8}\\nAverage Loss Days = {9}\\nBeginning Equity = {10}\\nFinal Equity = {11}\\nMax Drawdown = {12}\\nCurrent Drawdown = {13}\\nCurrent Cash Percent = {14}\\n\".format(from_date, to_date, total_pos, profit_pos_number, loss_pos_number, winloss_ratio, avg_profit, avg_loss, avg_profit_days, avg_loss_days, beginning_value, cur_value, max_drawdown, cur_drawdown, cur_cash_pos))\n\n draw_results(date_list, equity_list, cash_list, drawdown_list)\n return\n\n\n", "sub_path": "technical_indicators/backtesting/presentation.py", "file_name": "presentation.py", "file_ext": "py", "file_size_in_byte": 3500, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 39, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 50, "usage_type": "call"}, {"api_name": "technical_indicators.utils.DateTimeAwareEncoder", "line_number": 50, "usage_type": "name"}, {"api_name": "pyperclip.copy", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 53, "usage_type": "call"}, {"api_name": "technical_indicators.utils.DateTimeAwareEncoder", "line_number": 53, "usage_type": "name"}, {"api_name": "pyperclip.copy", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "621604597", "text": "from cryptography.hazmat.primitives import serialization \\\n as crypto_serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.backends import default_backend \\\n as crypto_default_backend\nfrom email.mime.text import MIMEText\nfrom cryptography.hazmat.backends import default_backend\n# from cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import ec\nfrom cryptography.hazmat.primitives import serialization\n\nimport random\nimport string\nimport smtplib\n\n\ndef gen_random_string(chars=32, lower=False):\n s = ''.join(\n random.choice(\n string.ascii_uppercase + string.ascii_lowercase + string.digits\n ) for _ in range(chars))\n if lower:\n return s.lower()\n return s\n\n\ndef send_email(recipients, subject, body):\n email_username = 'smtprelayuser@uvoo.io'\n\n msg = MIMEText(body)\n msg['Subject'] = subject\n msg['From'] = f'no-reply '\n msg['To'] = recipients\n msg = msg.as_string()\n\n session = smtplib.SMTP('smtp.uvoo.io', 587)\n session.ehlo()\n session.starttls()\n session.login(email_username, '$mtpRelayUser123@')\n session.sendmail(email_username, recipients, msg)\n session.quit()\n\n\ndef create_key_pair(key_cipher='rsa', key_format='openssh'):\n if key_cipher == 'rsa' and key_format == 'openssh':\n rsa_key = rsa.generate_private_key(\n backend=crypto_default_backend(),\n public_exponent=65537,\n key_size=4096\n )\n private_key = rsa_key.private_bytes(\n crypto_serialization.Encoding.PEM,\n crypto_serialization.PrivateFormat.PKCS8,\n crypto_serialization.NoEncryption())\n public_key = rsa_key.public_key().public_bytes(\n crypto_serialization.Encoding.OpenSSH,\n crypto_serialization.PublicFormat.OpenSSH\n )\n\n elif key_cipher == 'rsa' and key_format == 'pem':\n rsa_key = rsa.generate_private_key(\n backend=crypto_default_backend(),\n public_exponent=65537,\n key_size=4096\n )\n private_key = rsa_key.private_bytes(\n crypto_serialization.Encoding.PEM,\n crypto_serialization.PrivateFormat.PKCS8,\n crypto_serialization.NoEncryption())\n public_key = rsa_key.public_key().public_bytes(\n crypto_serialization.Encoding.PEM,\n crypto_serialization.PublicFormat.SubjectPublicKeyInfo\n )\n\n elif key_cipher == 'ec' and key_format == 'pem':\n # Ciphers: SECP384R1, SECP521R1\n ec_key = ec.generate_private_key(\n ec.SECP521R1(),\n default_backend()\n )\n private_key = ec_key.private_bytes(\n crypto_serialization.Encoding.PEM,\n crypto_serialization.PrivateFormat.PKCS8,\n crypto_serialization.NoEncryption())\n public_key = ec_key.public_key().public_bytes(\n crypto_serialization.Encoding.PEM,\n crypto_serialization.PublicFormat.SubjectPublicKeyInfo\n )\n else:\n s = f\"Unsupported key cipher {key_cipher} and/or format {key_format}.\"\n print(s)\n return -1\n\n return {'private_key': private_key.decode('utf-8'),\n 'public_key': public_key.decode('utf-8'),\n 'key_cipher': key_cipher,\n 'key_format': key_format}\n\n\ndef load_private_key(filename, file_format='pem'):\n with open(filename, 'rb') as pem_in:\n pemlines = pem_in.read()\n private_key = crypto_serialization.load_pem_private_key(\n pemlines, None, default_backend())\n return private_key\n\n\ndef load_key(filename, key_type='private', file_format='pem'):\n with open(filename, 'rb') as f:\n key_lines = f.read()\n\n if key_type == 'private':\n private_key = crypto_serialization.load_pem_private_key(\n key_lines, default_backend(), password=None)\n return private_key\n elif key_type == 'public':\n public_key = crypto_serialization.load_pem_public_key(\n key_lines, default_backend())\n return public_key\n else:\n raise Exception('E: Unsupported key type.')\n\n\ndef save_private_key(private_key, filename):\n pem = private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n with open(filename, 'wb') as pem_out:\n pem_out.write(pem)\n\n\ndef save_public_key(public_key, filename):\n pem = public_key.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.OpenSSH\n )\n with open(filename, 'wb') as pem_out:\n pem_out.write(pem)\n", "sub_path": "portal/portal/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 4741, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "random.choice", "line_number": 19, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 20, "usage_type": "attribute"}, {"api_name": "string.ascii_lowercase", "line_number": 20, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 20, "usage_type": "attribute"}, {"api_name": "email.mime.text.MIMEText", "line_number": 30, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 36, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key", "line_number": 46, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.rsa", "line_number": 46, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 47, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization.Encoding", "line_number": 52, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 52, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.PrivateFormat", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 53, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.NoEncryption", "line_number": 54, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 54, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.Encoding", "line_number": 56, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 56, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.PublicFormat", "line_number": 57, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 57, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key", "line_number": 61, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.rsa", "line_number": 61, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 62, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization.Encoding", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 67, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.PrivateFormat", "line_number": 68, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 68, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.NoEncryption", "line_number": 69, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 69, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.Encoding", "line_number": 71, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 71, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.PublicFormat", "line_number": 72, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 72, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.ec.generate_private_key", "line_number": 77, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.ec", "line_number": 77, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.ec.SECP521R1", "line_number": 78, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.ec", "line_number": 78, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 79, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization.Encoding", "line_number": 82, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 82, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.PrivateFormat", "line_number": 83, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 83, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.NoEncryption", "line_number": 84, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 84, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.Encoding", "line_number": 86, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 86, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.PublicFormat", "line_number": 87, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 87, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.load_pem_private_key", "line_number": 103, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 103, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 104, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization.load_pem_private_key", "line_number": 113, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 113, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 114, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization.load_pem_public_key", "line_number": 117, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 117, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 118, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization.Encoding", "line_number": 126, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 126, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.PrivateFormat", "line_number": 127, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 127, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.NoEncryption", "line_number": 128, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 128, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.Encoding", "line_number": 136, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 136, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.PublicFormat", "line_number": 137, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 137, "usage_type": "name"}]} +{"seq_id": "636844377", "text": "from flask import Flask\nfrom flask import Flask, flash, redirect, render_template, request, session, abort\nimport os\n#https://pythonspot.com/login-authentication-with-flask/\napp = Flask(__name__)\n\ndef string2int(string):\n sum=0\n for i in string:\n sum+=ord(i)\n return(sum)\n \n\ndef controlPanel(ORGS,MACHS,SEED):\n f=open(\"control_panel.py\",\"w\")\n f.write(f\"\"\"ORGS={ORGS}\nMACHS={MACHS}\nseed={SEED}\"\"\")\n f.close()\n \n@app.route('/home',methods = ['POST', 'GET'])\ndef result():\n if request.method == 'POST':\n \n ORGS=int(request.form[\"org\"])\n Low=int(request.form[\"low\"])\n High=int(request.form[\"high\"])\n MACHS=[Low,High]\n SEED=string2int(request.form[\"seed\"])\n controlPanel(ORGS,MACHS,SEED)\n \n estime= ORGS*High/10\n result = {'Orgs': ORGS, 'Range of Machines':str(MACHS),'Random Seed':str(SEED),'Estimated Seconds to start':estime}\n return render_template(\"home.html\",result = result)\n \n@app.route('/')\ndef home():\n if not session.get('logged_in'):\n return (render_template('login.html'))\n else:\n return render_template('input.html')\n #return ('Hello Boss! Logout')\n \n@app.route('/login', methods=['POST'])\ndef do_admin_login():\n \n if (request.form['password'] == 'password' and request.form['username'] == 'admin'):\n session['logged_in'] = True\n print(\"jj\")\n else:\n flash('wrong password!')\n return home()\n \n@app.route(\"/logout\")\ndef logout():\n session['logged_in'] = False\n return home()\n\n\n\nif __name__ == \"__main__\":\n app.secret_key = os.urandom(4)\n app.run(debug=False,host='0.0.0.0', port=4000)", "sub_path": "backup_5_8/backup_5_8/dumps/control/login.py", "file_name": "login.py", "file_ext": "py", "file_size_in_byte": 1676, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 56, "usage_type": "name"}, {"api_name": "os.urandom", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "17560925", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# MIT License\n#\n# Copyright (c) 2020 Louis Richard\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so.\n\n\"\"\"one_fluid_dispersion.py\n@author: Louis Richard\n\"\"\"\n\nimport numpy as np\nimport xarray as xr\n\nfrom scipy import constants, optimize\n\n\ndef _disprel(w, *args):\n k, theta = args[0:2]\n v_a, c_s = args[2:4]\n wc_e, wc_p = args[4:6]\n\n theta = np.deg2rad(theta)\n l_00 = 1\n l_01 = - w ** 2 / (k ** 2 * v_a ** 2)\n l_02 = - w ** 2 / (wc_e * wc_p)\n l_03 = k ** 2 * np.sin(theta) ** 2 / ((w / c_s) ** 2 - k ** 2)\n l_0_ = (l_00 + l_01 + l_02 + l_03)\n\n l_10 = np.cos(theta) ** 2\n l_11 = - w ** 2 / (k ** 2 * v_a ** 2)\n l_12 = - w ** 2 / (wc_e * wc_p)\n l_1_ = l_10 + l_11 + l_12\n\n r_0_ = w ** 2 * np.cos(theta) ** 2 / wc_p ** 2\n\n disprel = l_0_ * l_1_ - r_0_\n\n return disprel\n\n\ndef one_fluid_dispersion(b_0, theta, ions, electrons, n_k: int = 100):\n r\"\"\"Solves the one fluid dispersion relation.\n\n Parameters\n ----------\n b_0 : float\n Magnetic field\n\n theta : float\n The angle of propagation of the wave with respect to the magnetic\n field, :math:`\\cos^{-1}(k_z / k)`\n\n ions : dict\n Hash table with n : number density, t: temperature, gamma:\n polytropic index.\n\n electrons : dict\n Hash table with n : number density, t: temperature, gamma:\n polytropic index.\n\n n_k : int, optional\n Number of wavenumbers.\n\n Returns\n -------\n wc_1 : xarray.DataArray\n 1st root\n\n wc_2 : xarray.DataArray\n 2nd root\n\n wc_3 : xarray.DataArray\n 3rd root\n\n \"\"\"\n\n keys = [\"n\", \"t\", \"gamma\"]\n n_p, t_p, gamma_p = [ions[k] for k in keys]\n n_e, t_e, gamma_e = [electrons[k] for k in keys]\n\n q_e = constants.elementary_charge\n m_e = constants.electron_mass\n m_p = constants.proton_mass\n ep_0 = constants.epsilon_0\n mu_0 = constants.mu_0\n\n wc_e = q_e * b_0 / m_e\n wc_p = q_e * b_0 / m_p\n\n wp_e = np.sqrt(q_e ** 2 * n_e / (ep_0 * m_e))\n wp_p = np.sqrt(q_e ** 2 * n_p / (ep_0 * m_p))\n\n v_p = np.sqrt(q_e * t_p / m_p)\n v_e = np.sqrt(q_e * t_e / m_e)\n\n v_a = b_0 / np.sqrt(mu_0 * n_p * m_p)\n c_s = np.sqrt((gamma_e * q_e * t_e + gamma_p * q_e * t_p) / (m_e + m_p))\n\n k_vec = np.linspace(2e-7, 1.0e-4, n_k)\n\n wc_1, wc_2, wc_3 = [np.zeros(len(k_vec)) for _ in range(3)]\n\n for i, k in enumerate(k_vec):\n if i < 10:\n guess_w1 = v_a * k * 1.50\n guess_w2 = v_a * k * 0.70\n guess_w3 = c_s * k * 0.99\n else:\n guess_w1 = wc_1[i - 1] + (wc_1[i - 1] - wc_1[i - 2])\n guess_w2 = wc_2[i - 1] + (wc_2[i - 1] - wc_2[i - 2])\n guess_w3 = wc_3[i - 1] + (wc_3[i - 1] - wc_3[i - 2])\n\n arguments = (k, theta, v_a, c_s, wc_e, wc_p)\n wc_1[i] = optimize.fsolve(_disprel, guess_w1, args=arguments)[0]\n wc_2[i] = optimize.fsolve(_disprel, guess_w2, args=arguments)[0]\n wc_3[i] = optimize.fsolve(_disprel, guess_w3, args=arguments)[0]\n\n attrs = {\"wc_e\": wc_e, \"wc_p\": wc_p, \"wp_e\": wp_e, \"wp_p\": wp_p,\n \"v_p\": v_p, \"v_e\": v_e, \"v_a\": v_a, \"c_s\": c_s}\n\n wc_1 = xr.DataArray(wc_1, coords=[k_vec], dims=[\"k\"], attrs=attrs)\n wc_2 = xr.DataArray(wc_2, coords=[k_vec], dims=[\"k\"], attrs=attrs)\n wc_3 = xr.DataArray(wc_3, coords=[k_vec], dims=[\"k\"], attrs=attrs)\n\n return wc_1, wc_2, wc_3\n", "sub_path": "pyrfu/dispersion/one_fluid_dispersion.py", "file_name": "one_fluid_dispersion.py", "file_ext": "py", "file_size_in_byte": 3774, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.deg2rad", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.constants.elementary_charge", "line_number": 89, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 89, "usage_type": "name"}, {"api_name": "scipy.constants.electron_mass", "line_number": 90, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 90, "usage_type": "name"}, {"api_name": "scipy.constants.proton_mass", "line_number": 91, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 91, "usage_type": "name"}, {"api_name": "scipy.constants.epsilon_0", "line_number": 92, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 92, "usage_type": "name"}, {"api_name": "scipy.constants.mu_0", "line_number": 93, "usage_type": "attribute"}, {"api_name": "scipy.constants", "line_number": 93, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 109, "usage_type": "call"}, {"api_name": "scipy.optimize.fsolve", "line_number": 122, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 122, "usage_type": "name"}, {"api_name": "scipy.optimize.fsolve", "line_number": 123, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 123, "usage_type": "name"}, {"api_name": "scipy.optimize.fsolve", "line_number": 124, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 124, "usage_type": "name"}, {"api_name": "xarray.DataArray", "line_number": 129, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 130, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "441800660", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# 基本ライブラリ\nimport pandas as pd\nimport pandas.io.sql as psql\nimport numpy as np\nimport numpy.random as rd\nimport gc\nimport multiprocessing as mp\nimport os\nimport sys\nimport pickle\nfrom collections import defaultdict\nfrom glob import glob\nimport math\nfrom datetime import datetime as dt\nfrom pathlib import Path\nimport scipy.stats as st\nimport re\nimport shutil\nfrom tqdm import tqdm_notebook as tqdm\nimport datetime\nts_conv = np.vectorize(datetime.datetime.fromtimestamp) # 秒ut(10桁) ⇒ 日付\n\n# グラフ描画系\nimport matplotlib\nfrom matplotlib import font_manager\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom matplotlib import rc\n\nfrom matplotlib import animation as ani\nfrom IPython.display import Image\n\nplt.rcParams[\"patch.force_edgecolor\"] = True\n#rc('text', usetex=True)\nfrom IPython.display import display # Allows the use of display() for DataFrames\nimport seaborn as sns\nsns.set(style=\"whitegrid\", palette=\"muted\", color_codes=True)\nsns.set_style(\"whitegrid\", {'grid.linestyle': '--'})\nred = sns.xkcd_rgb[\"light red\"]\ngreen = sns.xkcd_rgb[\"medium green\"]\nblue = sns.xkcd_rgb[\"denim blue\"]\n\n#カラム内の文字数。デフォルトは50\npd.set_option(\"display.max_colwidth\", 100)\n\n#行数\npd.set_option(\"display.max_rows\", None)\npd.set_option(\"display.max_columns\", None)\n#\npd.options.display.float_format = '{:,.5f}'.format\n\n# get_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\n\nimport lightgbm as lgb\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import mean_absolute_error\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import StratifiedKFold, KFold, RepeatedKFold\n\nfrom sklearn import metrics\nimport json\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nsys.path.append('..')\nfrom lib.line_notif import send_message\nfrom lib.utils import reduce_mem_usage, current_time, unpickle, to_pickle\nfrom lib.utils import one_hot_encoder, apply_agg, multi_combine_categorical_feature\nfrom lib.utils import import_data, get_split_indexer \n\nclass ModelExtractionCallback(object):\n \"\"\"Callback class for retrieving trained model from lightgbm.cv()\n NOTE: This class depends on '_CVBooster' which is hidden class, so it might doesn't work if the specification is changed.\n \"\"\"\n\n def __init__(self):\n self._model = None\n\n def __call__(self, env):\n # Saving _CVBooster object.\n self._model = env.model\n\n def _assert_called_cb(self):\n if self._model is None:\n # Throw exception if the callback class is not called.\n raise RuntimeError('callback has not called yet')\n\n @property\n def boosters_proxy(self):\n self._assert_called_cb()\n # return Booster object\n return self._model\n\n @property\n def raw_boosters(self):\n self._assert_called_cb()\n # return list of Booster\n return self._model.boosters\n\n @property\n def best_iteration(self):\n self._assert_called_cb()\n # return boosting round when early stopping.\n return self._model.best_iteration\n\n\n# In[3]:\n\n\nDATA_VERSION = \"v001\"\nTRIAL_NO = \"001\"\nsave_path = Path(f\"../processed/{DATA_VERSION}\")\nsave_path.mkdir(parents=True, exist_ok=True)\nmodel_path = Path(f\"../model/{DATA_VERSION}_{TRIAL_NO}\")\nmodel_path.mkdir(parents=True, exist_ok=True)\nsubmit_path = Path(f\"../submit/{DATA_VERSION}_{TRIAL_NO}\")\nsubmit_path.mkdir(parents=True, exist_ok=True)\n\n\nprint(\"start loading...\")\ntrain = unpickle(save_path/\"train_002.df.pkl\", )\nprint(\"train loaded.\")\ntest = unpickle(save_path/\"test_002.df.pkl\", )\nprint(\"test loaded.\")\ny = train[\"scalar_coupling_constant\"]\ntrain.drop(\"scalar_coupling_constant\", axis=1, inplace=True)\n\ntrain.set_index(\"id\", inplace=True)\ntest.set_index(\"id\", inplace=True)\n\nprint(train.shape, test.shape, y.shape)\n\ncategorical = ['atom_index_0', 'atom_index_1', 'atom_1', 'atom_0', 'type_0', 'type']\nlgbm_params = {\n \"boosting_type\": \"gbdt\",\n 'objective': 'regression',\n \"metric\": 'mae',\n # 'n_estimator': 10000,\n 'n_jobs': -1,\n \"seed\": 71,\n \"verbosity\": -1,\n \n 'learning_rate': 0.1,\n \n 'max_depth': 9,\n 'num_leaves': 128,\n \n \"subsample_freq\": 1,\n \"subsample\": 0.8,\n 'colsample_bytree': 0.8,\n \n 'min_child_samples': 100,\n 'reg_alpha': 0.1,\n 'reg_lambda': 0.3,\n}\n\nlgb_train = lgb.Dataset(train, y)\n\n# Training settings\nFOLD_NUM = 5\nfold_seed = 71\nfolds = KFold(n_splits=FOLD_NUM, shuffle=True, random_state=fold_seed)\n\nextraction_cb = ModelExtractionCallback()\ncallbacks = [\n ModelExtractionCallback()\n]\n\n# Fitting\nprint(\"start fitting...\")\nret = lgb.cv(params=lgbm_params,\n train_set=lgb_train,\n categorical_feature=categorical,\n folds=folds,\n num_boost_round=30000,\n verbose_eval = 500,\n early_stopping_rounds=200,\n callbacks=callbacks,\n )\ndf_ret = pd.DataFrame(ret)\ndisplay(df_ret)\nprint(\"finish fitting.\")\n\n# Retrieving booster and training information.\nproxy = extraction_cb.boosters_proxy\nboosters = extraction_cb.raw_boosters\nbest_iteration = extraction_cb.best_iteration\nto_pickle(model_path/'extraction_cb.pkl', extraction_cb)\n\n# Create oof prediction result\nprint(\"create oof preds.\")\nfold_iter = folds.split(train, y)\noof_preds = np.zeros_like(y)\nfor n_fold, ((trn_idx, val_idx), booster) in enumerate(zip(fold_iter, boosters)):\n print(val_idx)\n valid = train.iloc[val_idx]\n oof_preds[val_idx] = booster.predict(valid, num_iteration=best_iteration)\nprint(f\"mae on oof preds: {mean_absolute_error(y, oof_preds)}\")\nnp.save(submit_path/'oof.npy', oof_preds)\n\n# Averaging prediction result for test data.\ny_pred_proba_list = proxy.predict(test, num_iteration=best_iteration)\ny_pred_proba_avg = np.array(y_pred_proba_list).mean(axis=0)\n\nsub = pd.read_csv('../input/sample_submission.csv')\nsub['scalar_coupling_constant'] = y_pred_proba_avg\nsub.to_csv(submit_path/'submission.csv', index=False)\nsub.head()\n\nprint(\"finish.\")", "sub_path": "src/backup/train_v001_001.py", "file_name": "train_v001_001.py", "file_ext": "py", "file_size_in_byte": 6082, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.vectorize", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 39, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 43, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 44, "usage_type": "call"}, {"api_name": "seaborn.xkcd_rgb", "line_number": 45, "usage_type": "attribute"}, {"api_name": "seaborn.xkcd_rgb", "line_number": 46, "usage_type": "attribute"}, {"api_name": "seaborn.xkcd_rgb", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pandas.set_option", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.options", "line_number": 56, "usage_type": "attribute"}, {"api_name": "warnings.filterwarnings", "line_number": 76, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 78, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 125, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 127, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 129, "usage_type": "call"}, {"api_name": "lib.utils.unpickle", "line_number": 134, "usage_type": "call"}, {"api_name": "lib.utils.unpickle", "line_number": 136, "usage_type": "call"}, {"api_name": "lightgbm.Dataset", "line_number": 170, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 175, "usage_type": "call"}, {"api_name": "lightgbm.cv", "line_number": 184, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 193, "usage_type": "call"}, {"api_name": "IPython.display.display", "line_number": 194, "usage_type": "call"}, {"api_name": "lib.utils.to_pickle", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 206, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 216, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 218, "usage_type": "call"}]} +{"seq_id": "375213268", "text": "\"\"\"Module responsible for taking given feature and label files and\nsplitting into respective train/test sets. These sets are written to disk.\"\"\"\n\nimport os, sys, json, logging, argparse, bz2\nimport cPickle as pickle\nfrom collections import Counter\nfrom numpy import mean, std, min, max\n\nroot_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nsys.path.append(root_dir)\n\nfrom util.DataStreamer import DataStreamer\nfrom util.common import load_sparse_csr, save_sparse_csr\n\nlogging.basicConfig(level=logging.INFO)\n\nparser = argparse.ArgumentParser(description='splits bz2 Train.csv into train/dev/test with almost even label distribution')\nparser.add_argument('TrainBZ2', help=\"the dataset file\")\nparser.add_argument('labels', help=\"the json dictionary containing the labels to look for\")\nparser.add_argument('out_file', help=\"the prefix for the files in which to store the subsampled examples\")\n\nparser.add_argument('--top_n_labels', type=int, help=\"the top n labels to use. Default is 50\", default=50)\nparser.add_argument('--min_count', type=int, help='the minimum number of examples per label. Default is 100', default=100)\nparser.add_argument('--test_fraction', type=float, help='portion of data to use for test set. Default is 0.15', default=0.15)\nparser.add_argument('--val_fraction', type=float, help='portion of data to use for validation set. Default is 0.15', default=0.15)\nargs = parser.parse_args()\n\n\nwith open(args.labels) as f:\n label_counts = pickle.load(f)\n\nlabel_counts = label_counts.most_common(args.top_n_labels)\nmost_common_tags = set([t[0] for t in label_counts])\nsmallest_count = label_counts[-1][1]\n\nmin_num_val = int(min([smallest_count, args.min_count]) * args.val_fraction)\nmin_num_test = int(min([smallest_count, args.min_count]) * args.test_fraction)\n\nval_indices = set()\ntest_indices = set()\n\ninitial_counts = dict(zip(list(most_common_tags), [0] * len(most_common_tags)))\ntrain_counts = Counter(initial_counts)\nval_counts = Counter(initial_counts)\ntest_counts = Counter(initial_counts)\n\ntrain_out = bz2.BZ2File(args.out_file + '.train.bz2', 'wb', compresslevel=9)\nval_out = bz2.BZ2File(args.out_file + '.val.bz2', 'wb', compresslevel=9)\ntest_out = bz2.BZ2File(args.out_file + '.test.bz2', 'wb', compresslevel=9)\n\ni = 0\nj_train = 0\nj_val = 0\nj_test = 0\n\ntrain_indices = []\nval_indices = []\ntest_indices = []\n\nfor example in DataStreamer.load_from_file(args.TrainBZ2):\n\n if i%10000 == 0:\n logging.info('processed %s examples, dumped %s train, %s val, %s test, %s total' % (i, j_train, j_val, j_test, j_train+j_val+j_test))\n\n tags = example.data['tags']\n matching = set(tags).intersection(most_common_tags)\n\n need_more_train_examples = [c for c in train_counts.values() if c < args.min_count]\n can_use_this_for_train = len([c for c in matching if train_counts[c] > args.min_count]) < 2\n need_more_val_examples = [c for c in val_counts.values() if c < min_num_val]\n can_use_this_for_val = len([c for c in matching if val_counts[c] > min_num_val]) < 2\n need_more_test_examples = [c for c in test_counts.values() if c < min_num_test]\n can_use_this_for_test = len([c for c in matching if test_counts[c] > min_num_test]) < 2\n\n if not (need_more_train_examples or need_more_val_examples or need_more_test_examples):\n break\n\n if need_more_train_examples and can_use_this_for_train:\n train_counts.update(matching)\n train_out.write(example.to_json() + \"\\n\")\n train_indices += [i]\n j_train += 1\n\n elif need_more_val_examples and can_use_this_for_val:\n val_counts.update(matching)\n val_out.write(example.to_json() + \"\\n\")\n val_indices += [i]\n j_val += 1\n\n elif need_more_test_examples and can_use_this_for_test:\n test_counts.update(matching)\n test_out.write(example.to_json() + \"\\n\")\n test_indices += [i]\n j_test += 1\n i += 1\n\nlogging.info('processed %s examples, dumped %s train, %s val, %s test, %s total' % (i, j_train, j_val, j_test, j_train+j_val+j_test))\ntrain_out.close()\nval_out.close()\ntest_out.close()\n\ndef stats_str(counter, name):\n return \"label distribution for %s:\\n mean: %s, std: %s, min: %s, max: %s\" % (name, mean(counter.values()), std(counter.values()), min(counter.values()), max(counter.values()))\n\nlogging.info(stats_str(train_counts, 'train'))\nlogging.info(stats_str(val_counts, 'val'))\nlogging.info(stats_str(test_counts, 'test'))\n\nmapping = (train_counts + val_counts + test_counts).most_common(args.top_n_labels)\nmapping = [t[0] for t in mapping]\nwith open(args.out_file + '.labels.counts.json', 'wb') as f:\n json.dump({'train': train_counts,\n 'val': val_counts,\n 'test': test_counts,\n 'total': train_counts + val_counts + test_counts,\n 'mapping': mapping}, f)\n\n\n", "sub_path": "util/split_data.py", "file_name": "split_data.py", "file_ext": "py", "file_size_in_byte": 4798, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 15, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "cPickle.load", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 37, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 43, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 44, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 45, "usage_type": "call"}, {"api_name": "bz2.BZ2File", "line_number": 47, "usage_type": "call"}, {"api_name": "bz2.BZ2File", "line_number": 48, "usage_type": "call"}, {"api_name": "bz2.BZ2File", "line_number": 49, "usage_type": "call"}, {"api_name": "util.DataStreamer.DataStreamer.load_from_file", "line_number": 60, "usage_type": "call"}, {"api_name": "util.DataStreamer.DataStreamer", "line_number": 60, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 103, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 105, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 106, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 107, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "45950779", "text": "import pytest\n\nfrom async_v20.definitions import types\nfrom async_v20.definitions.base import Model\nfrom async_v20.definitions.base import create_attribute\nfrom tests.test_definitions.helpers import get_valid_primitive_data, create_cls_annotations\n\nmodel_classes = (cls for cls in (getattr(types, typ) for typ in types.__all__) if\n issubclass(cls, Model))\n\nmodel_classes_data = [(cls, get_valid_primitive_data(cls)) for cls in model_classes]\n\n\n@pytest.mark.parametrize('cls, data', model_classes_data)\ndef test_class_annotations_match_the_parents_class_annotations(cls, data):\n if not cls.__bases__[0] == Model:\n print(cls.__bases__[0].__new__.__annotations__)\n print(cls.__new__.__annotations__)\n\n for annotation in cls.__new__.__annotations__:\n assert annotation in cls.__bases__[0].__new__.__annotations__\n\n\n@pytest.mark.parametrize('cls, data', model_classes_data)\ndef test_all_types_can_be_instantiated_from_dict(cls, data):\n arguments = data\n result = cls(**arguments)\n assert result\n assert type(result) == cls\n # Test class instance can be used to create another instance of the same class\n result = cls(**result.dict())\n assert result\n assert type(result) == cls\n\n\n@pytest.mark.parametrize('cls, data', model_classes_data)\ndef test_all_types_instantiated_from_dict_with_incorrect_argument_raises_error(cls, data):\n arguments = data.copy()\n arguments.update(this_argument_doesnt_exist='TEST_VALUE')\n with pytest.raises(ValueError):\n cls(**arguments)\n\n\n@pytest.mark.parametrize('cls, data', model_classes_data)\ndef test_all_types_can_be_instantiated_from_tuple(cls, data):\n arguments = tuple(data.values())\n # make sure the arguments are in the correct order\n result = cls(*arguments)\n result_json = result.json()\n assert result\n assert type(result) == cls\n\n for index, argument in enumerate(arguments):\n if isinstance(argument, dict):\n args = list(arguments)\n args[index] = tuple(argument.values())\n assert cls(*args).json() == result_json\n\n\n@pytest.mark.parametrize('cls, data', model_classes_data)\ndef test_all_types_can_be_instantiated_from_annotation(cls, data):\n arguments = {k: create_attribute(create_cls_annotations(cls)[k], v)\n for k, v in data.items()}\n print(arguments)\n assert cls(**arguments)\n\n@pytest.mark.parametrize('cls, data', model_classes_data)\ndef test_all_derived_types_have_same_arguments_and_annotations_as_parent(cls, data):\n parent_class = cls.__bases__[0]\n if not parent_class == Model:\n parent_class_parameters = parent_class.__new__.__signature__.parameters\n for name, parameter in cls.__new__.__signature__.parameters.items():\n assert name in parent_class_parameters\n assert issubclass(parameter.annotation,parent_class_parameters[name].annotation)\n", "sub_path": "tests/test_definitions/test_types.py", "file_name": "test_types.py", "file_ext": "py", "file_size_in_byte": 2906, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "async_v20.definitions.types", "line_number": 8, "usage_type": "argument"}, {"api_name": "async_v20.definitions.types.__all__", "line_number": 8, "usage_type": "attribute"}, {"api_name": "async_v20.definitions.base.Model", "line_number": 9, "usage_type": "argument"}, {"api_name": "tests.test_definitions.helpers.get_valid_primitive_data", "line_number": 11, "usage_type": "call"}, {"api_name": "async_v20.definitions.base.Model", "line_number": 16, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 24, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 40, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 36, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 44, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 44, "usage_type": "attribute"}, {"api_name": "async_v20.definitions.base.create_attribute", "line_number": 62, "usage_type": "call"}, {"api_name": "tests.test_definitions.helpers.create_cls_annotations", "line_number": 62, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 60, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 60, "usage_type": "attribute"}, {"api_name": "async_v20.definitions.base.Model", "line_number": 70, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 67, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 67, "usage_type": "attribute"}]} +{"seq_id": "552837836", "text": "\"\"\"The Game of Hog.\"\"\"\n\nfrom dice import four_sided, six_sided, make_test_dice\nfrom ucb import main, trace, log_current_line, interact\nfrom math import sqrt, ceil\n\nGOAL_SCORE = 100 # The goal of Hog is to score 100 points.\n\n\n######################\n# Phase 1: Simulator #\n######################\n\n\ndef roll_dice(num_rolls, dice=six_sided):\n \"\"\"Simulate rolling the DICE exactly NUM_ROLLS times. Return the sum of\n the outcomes unless any of the outcomes is 1. In that case, return 0.\n \"\"\"\n # These assert statements ensure that num_rolls is a positive integer.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n # BEGIN Question 1\n\n dice_rolls = []\n\n for i in range(num_rolls):\n dice_rolls.append(dice())\n\n return 0 if 1 in dice_rolls else sum(dice_rolls)\n # END Question 1\n\n\ndef take_turn(num_rolls, opponent_score, dice=six_sided):\n \"\"\"Simulate a turn rolling NUM_ROLLS dice, which may be 0 (Free bacon).\n\n num_rolls: The number of dice rolls that will be made.\n opponent_score: The total score of the opponent.\n dice: A function of no args that returns an integer outcome.\n \"\"\"\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls >= 0, 'Cannot roll a negative number of dice.'\n assert num_rolls <= 10, 'Cannot roll more than 10 dice.'\n assert opponent_score < 100, 'The game should be over.'\n\n def is_prime(n):\n # make boolean array of divisibility and check if True is inside\n return False if n <= 1 else not (True in [n % i == 0 for i in range(n-1, ceil(sqrt(n)-1), -1)])\n\n def next_prime(n):\n k = n+1\n while True:\n if is_prime(k):\n return k\n else:\n k += 1\n\n score = 0\n\n # BEGIN Question 2\n if num_rolls == 0:\n # change integer into sorted digit array\n digit_array = sorted([int(digit_char) for digit_char in str(opponent_score)])\n score = digit_array[-1] + 1\n else:\n score = roll_dice(num_rolls, dice)\n\n if is_prime(score):\n return next_prime(score)\n\n return score\n\n # END Question 2\n\n\ndef select_dice(score, opponent_score):\n \"\"\"Select six-sided dice unless the sum of SCORE and OPPONENT_SCORE is a\n multiple of 7, in which case select four-sided dice (Hog wild).\n \"\"\"\n # BEGIN Question 3\n return four_sided if (score + opponent_score) % 7 == 0 else six_sided\n # END Question 3\n\n\ndef is_swap(score0, score1):\n \"\"\"Returns whether the last two digits of SCORE0 and SCORE1 are reversed\n versions of each other, such as 19 and 91.\n \"\"\"\n # BEGIN Question 4\n s0, s1 = str(score0)[-2:], str(score1)[-2:][::-1]\n return s0 == s1 or s0 + '0' == s1 or s1 + '0' == s0 or '0' + s0 == s1 or '0' + s1 == s0\n # END Question 4\n\n\ndef other(who):\n \"\"\"Return the other player, for a player WHO numbered 0 or 1.\n\n >>> other(0)\n 1\n >>> other(1)\n 0\n \"\"\"\n return 1 - who\n\n\ndef play(strategy0, strategy1, score0=0, score1=0, goal=GOAL_SCORE):\n \"\"\"Simulate a game and return the final scores of both players, with\n Player 0's score first, and Player 1's score second.\n\n A strategy is a function that takes two total scores as arguments\n (the current player's score, and the opponent's score), and returns a\n number of dice that the current player will roll this turn.\n\n strategy0: The strategy function for Player 0, who plays first\n strategy1: The strategy function for Player 1, who plays second\n score0 : The starting score for Player 0\n score1 : The starting score for Player 1\n \"\"\"\n who = 0 # Which player is about to take a turn, 0 (first) or 1 (second)\n # BEGIN Question 5\n\n scores = [score0, score1]\n\n while True:\n if who == 0:\n num_rolls, dice = strategy0(scores[who], scores[other(who)]), select_dice(scores[who], scores[other(who)])\n else:\n num_rolls, dice = strategy1(scores[who], scores[other(who)]), select_dice(scores[who], scores[other(who)])\n turn_score = take_turn(num_rolls, scores[other(who)], dice)\n\n if turn_score == 0:\n scores[other(who)] += num_rolls\n\n scores[who] += turn_score\n\n if is_swap(scores[who], scores[other(who)]):\n scores[who], scores[other(who)] = scores[other(who)], scores[who]\n if scores[who] >= goal or scores[other(who)] >= goal:\n break\n\n who = other(who)\n\n # END Question 5\n return scores[0], scores[1]\n\n\n#######################\n# Phase 2: Strategies #\n#######################\n\n\ndef always_roll(n):\n \"\"\"Return a strategy that always rolls N dice.\n\n A strategy is a function that takes two total scores as arguments\n (the current player's score, and the opponent's score), and returns a\n number of dice that the current player will roll this turn.\n\n >>> strategy = always_roll(5)\n >>> strategy(0, 0)\n 5\n >>> strategy(99, 99)\n 5\n \"\"\"\n def strategy(score, opponent_score):\n return n\n\n return strategy\n\n# Experiments\ndef make_averaged(fn, num_samples=1000):\n \"\"\"Return a function that returns the average_value of FN when called.\n\n To implement this function, you will have to use *args syntax, a new Python\n feature introduced in this project. See the project description.\n\n >>> dice = make_test_dice(3, 1, 5, 6)\n >>> averaged_dice = make_averaged(dice, 1000)\n >>> averaged_dice()\n 3.75\n >>> make_averaged(roll_dice, 1000)(2, dice)\n 5.5\n\n In this last example, two different turn scenarios are averaged.\n - In the first, the player rolls a 3 then a 1, receiving a score of 0.\n - In the other, the player rolls a 5 and 6, scoring 11.\n Thus, the average value is 5.5.\n Note that the last example uses roll_dice so the hogtimus prime rule does\n not apply.\n \"\"\"\n # BEGIN Question 6\n def average(*args):\n return sum([fn(*args) for i in range(num_samples)]) / num_samples\n return average\n # END Question 6\n\n\ndef max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n \"\"\"Return the number of dice (1 to 10) that gives the highest average turn\n score by calling roll_dice with the provided DICE over NUM_SAMPLES times.\n Assume that dice always return positive outcomes.\n\n >>> dice = make_test_dice(3)\n >>> max_scoring_num_rolls(dice)\n 10\n \"\"\"\n # BEGIN Question 7\n ave = make_averaged(roll_dice, num_samples)\n averages = [ave(i, dice) for i in range(1, 11)]\n return averages.index(max(averages)) + 1\n\n # END Question 7\n\n\ndef winner(strategy0, strategy1):\n \"\"\"Return 0 if strategy0 wins against strategy1, and 1 otherwise.\"\"\"\n score0, score1 = play(strategy0, strategy1)\n if score0 > score1:\n return 0\n else:\n return 1\n\n\ndef ways_to_roll_at_least(k, n):\n if k <= 0:\n return pow(5, n)\n elif n == 0:\n return 0\n else:\n total, d = 0, 2\n while d <= 6:\n total = total + ways_to_roll_at_least(k - d, n - 1)\n d = d + 1\n return total\n\ndef chance_to_roll(k, n):\n return (ways_to_roll_at_least(k, n) / pow(6, n)) * 100\n\ndef print_arr(limit):\n arr = []\n for i in range(1,limit+1):\n test_arr = [float('{:.5f}'.format(chance_to_roll(i, j))) for j in range(1, 11)]\n arr.append(test_arr)\n print(arr)\n\ndef average_win_rate(strategy, baseline=always_roll(5)):\n \"\"\"Return the average win rate of STRATEGY against BASELINE. Averages the\n winrate when starting the game as player 0 and as player 1.\n \"\"\"\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2\n\n\ndef run_experiments():\n \"\"\"Run a series of strategy experiments and report results.\"\"\"\n if True: # Change to False when done finding max_scoring_num_rolls\n six_sided_max = max_scoring_num_rolls(six_sided)\n print('Max scoring num rolls for six-sided dice:', six_sided_max)\n four_sided_max = max_scoring_num_rolls(four_sided)\n print('Max scoring num rolls for four-sided dice:', four_sided_max)\n\n if False: # Change to True to test always_roll(8)\n print('always_roll(8) win rate:', average_win_rate(always_roll(8)))\n\n if False: # Change to True to test bacon_strategy\n print('bacon_strategy win rate:', average_win_rate(bacon_strategy))\n\n if False: # Change to True to test swap_strategy\n print('swap_strategy win rate:', average_win_rate(swap_strategy))\n\n \"*** You may add additional experiments as you wish ***\"\n\n\n# Strategies\n\ndef bacon_strategy(score, opponent_score, margin=8, num_rolls=5):\n \"\"\"This strategy rolls 0 dice if that gives at least MARGIN points,\n and rolls NUM_ROLLS otherwise.\n \"\"\"\n # BEGIN Question 8\n \"*** REPLACE THIS LINE ***\"\n return 0 if take_turn(0, opponent_score) >= margin else num_rolls\n # END Question 8\n\n\ndef swap_strategy(score, opponent_score, num_rolls=5):\n \"\"\"This strategy rolls 0 dice when it results in a beneficial swap and\n rolls NUM_ROLLS otherwise.\n \"\"\"\n # BEGIN Question 9\n return 0 if is_swap(score + take_turn(0, opponent_score), opponent_score) and score + take_turn(0, opponent_score) < opponent_score else num_rolls\n # END Question 9\n\n\ndef bacon_seven(score, o_score):\n if (take_turn(0, o_score) + score + o_score) % 7 == 0:\n return True\n return False\n\ndef final_strategy(score, opponent_score):\n \"\"\"Write a brief description of your final strategy.\n\n At the very start of game hog wild applies so returns 4\n If there is hog wild on current turn return 0\n If free bacon strategy yeilds to a hog wild return 0\n When ahead by 24 or more try bacon strategy with low margin or return 4\n When losing by 25 or more try bacon strategy with high margin or return 6\n Otherwise try bacon strategy with a margin of 6\n If within 12 of 100 return 3 and if winning return 4\n Default is to return 6\n\n \"\"\"\n # BEGIN Question 10\n\n if (score == 0 and opponent_score == 0):\n return 4\n if (score + opponent_score) % 7 == 0:\n return 0\n if (bacon_seven(score, opponent_score)):\n return 0\n\n if(score - opponent_score >= 24):\n if(bacon_strategy(score, opponent_score, margin=3) == 0):\n return 0\n else:\n if(score - opponent_score >= 40):\n return 2\n return 4\n elif(opponent_score - score >= 25):\n if(bacon_strategy(score, opponent_score, margin=5) == 0):\n return 0\n else:\n return 6\n\n if(score - opponent_score >= 10):\n if(bacon_strategy(score, opponent_score, margin=2) == 0):\n return 0\n else:\n return 2\n elif(opponent_score - score >= 25):\n if(bacon_strategy(score, opponent_score, margin=8) == 0):\n return 0\n else:\n return 8\n else:\n if(bacon_strategy(score, opponent_score, margin=6) == 0):\n return 0\n else:\n if(100 - score <= 88):\n return 3\n if (score > opponent_score):\n return 4\n return 6\n # END Question 10\n\n\n##########################\n# Command Line Interface #\n##########################\n\n\n# Note: Functions in this section do not need to be changed. They use features\n# of Python not yet covered in the course.\n\n\n@main\ndef run(*args):\n \"\"\"Read in the command-line argument and calls corresponding functions.\n\n This function uses Python syntax/techniques not yet covered in this course.\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description=\"Play Hog\")\n parser.add_argument('--run_experiments', '-r', action='store_true',\n help='Runs strategy experiments')\n\n args = parser.parse_args()\n\n if args.run_experiments:\n run_experiments()\n", "sub_path": "projects/hog/hog.py", "file_name": "hog.py", "file_ext": "py", "file_size_in_byte": 12026, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "dice.six_sided", "line_number": 15, "usage_type": "name"}, {"api_name": "dice.six_sided", "line_number": 33, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 47, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 47, "usage_type": "call"}, {"api_name": "dice.four_sided", "line_number": 80, "usage_type": "name"}, {"api_name": "dice.six_sided", "line_number": 80, "usage_type": "name"}, {"api_name": "dice.six_sided", "line_number": 197, "usage_type": "name"}, {"api_name": "dice.six_sided", "line_number": 258, "usage_type": "argument"}, {"api_name": "dice.four_sided", "line_number": 260, "usage_type": "argument"}, {"api_name": "argparse.ArgumentParser", "line_number": 374, "usage_type": "call"}, {"api_name": "ucb.main", "line_number": 367, "usage_type": "name"}]} +{"seq_id": "485336027", "text": "\"\"\"\nInference APIs for the trained models.\n\"\"\"\n\nfrom typing import Callable\nimport torch\nimport torchvision.transforms as T\nfrom PIL import Image\n\n\n# Image Transform before inference.\ntransform = T.Compose([\n T.Resize(800),\n T.ToTensor(),\n T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\n\n# for output bounding box post-processing\ndef box_cxcywh_to_xyxy(x):\n x_c, y_c, w, h = x.unbind(1)\n b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n (x_c + 0.5 * w), (y_c + 0.5 * h)]\n return torch.stack(b, dim=1)\n\n\ndef rescale_bboxes(out_bbox, size):\n img_w, img_h = size\n b = box_cxcywh_to_xyxy(out_bbox)\n b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)\n return b\n\n\ndef detect(image: Image, model: Callable,\n transform: Callable, threshold=0.8):\n \"\"\"\n @param img: PIL Image\n @param model: A Serialized callable, exported using torchscript.\n @param transform: Data transformer function.\n @param threshold: Confidence of bbox prediction.\n \"\"\"\n # mean-std normalize the input image (batch-size: 1)\n image_tnsr = transform(image).unsqueeze(0)\n\n # propagate through the model\n outputs = model(image_tnsr)\n\n # keep only predictions with 0.7+ confidence\n # Skip the default background class added at train time.\n probas = outputs['pred_logits'].softmax(-1)[0, :, :-1]\n keep = probas.max(-1).values > threshold\n\n # convert boxes from [0; 1] to image scales\n bboxes_scaled = rescale_bboxes(outputs['pred_boxes'][0, keep], image.size)\n # bboxes = box_cxcywh_to_xyxy(outputs['pred_boxes'][0, keep])\n\n return probas[keep], bboxes_scaled\n", "sub_path": "source/pic2card/mystique/models/pth/detr/predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 1658, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 12, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 12, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 13, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 14, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 14, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 15, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 30, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "514161184", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\nimport re\nfrom flask import Flask, render_template, request, redirect, url_for, flash\nfrom autosignin import asi\napp = Flask(__name__)\napp.config.from_object('config')\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template('index.html',)\n\n\n@app.route('/get')\ndef get():\n list_ = asi.list()\n return render_template('list.html', datas=list_)\n\n\n@app.route('/edit/')\ndef edit(webname):\n return render_template('edit.html', data=asi.get(webname))\n\n\n@app.route('/del/')\ndef del_(webname):\n return render_template('del.html', webname=webname)\n\n\n@app.route('/delconfirm/')\ndef delconfirm(webname):\n asi.del_(webname)\n flash(u'%s 删除成功' % webname)\n return redirect(url_for('index'))\n\n\n@app.route('/set', methods=['POST'])\ndef set():\n if request.method == 'GET':\n return redirect('/')\n if request.method == 'POST':\n url = request.form['url']\n name = re.findall('[a-zA-Z]://([a-zA-Z0-9-._]+)', url)[0]\n method = request.form['method']\n data = request.form['data']\n cookies = request.form['cookies']\n if name in asi.data.sections():\n list_ = asi.get(name)\n if list_['url'] != url:\n asi.setSignInUrl(name, url)\n flash(u'%s url已修改' % name)\n if list_['data'] != data:\n asi.setData(name, data)\n flash(u'%s data已修改' % name)\n if list_['cookies'] != cookies:\n asi.setCookies(name, cookies)\n flash(u'%s cookies已修改' % name)\n if list_['method'] != method:\n asi.setMethod(name, method)\n flash(u'%s method已修改' % name)\n else:\n asi.add(name, url, method, cookies, data)\n flash(u'添加成功')\n return redirect(url_for('index'))\n", "sub_path": "web.py", "file_name": "web.py", "file_ext": "py", "file_size_in_byte": 1930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 14, "usage_type": "call"}, {"api_name": "autosignin.asi.list", "line_number": 19, "usage_type": "call"}, {"api_name": "autosignin.asi", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "autosignin.asi.get", "line_number": 25, "usage_type": "call"}, {"api_name": "autosignin.asi", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}, {"api_name": "autosignin.asi.del_", "line_number": 35, "usage_type": "call"}, {"api_name": "autosignin.asi", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "autosignin.asi.data.sections", "line_number": 50, "usage_type": "call"}, {"api_name": "autosignin.asi.data", "line_number": 50, "usage_type": "attribute"}, {"api_name": "autosignin.asi", "line_number": 50, "usage_type": "name"}, {"api_name": "autosignin.asi.get", "line_number": 51, "usage_type": "call"}, {"api_name": "autosignin.asi", "line_number": 51, "usage_type": "name"}, {"api_name": "autosignin.asi.setSignInUrl", "line_number": 53, "usage_type": "call"}, {"api_name": "autosignin.asi", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 54, "usage_type": "call"}, {"api_name": "autosignin.asi.setData", "line_number": 56, "usage_type": "call"}, {"api_name": "autosignin.asi", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 57, "usage_type": "call"}, {"api_name": "autosignin.asi.setCookies", "line_number": 59, "usage_type": "call"}, {"api_name": "autosignin.asi", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 60, "usage_type": "call"}, {"api_name": "autosignin.asi.setMethod", "line_number": 62, "usage_type": "call"}, {"api_name": "autosignin.asi", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 63, "usage_type": "call"}, {"api_name": "autosignin.asi.add", "line_number": 65, "usage_type": "call"}, {"api_name": "autosignin.asi", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "50187586", "text": "import os\n#import setup\nfrom flask import Flask, render_template, url_for, request, make_response\nimport boto3\nfrom dotenv import load_dotenv\n#from boto.mturk.connection import MTurkConnection\n#from boto.mturk.question import ExternalQuestion\n#from boto.mturk.qualification import Qualifications, PercentAssignmentsApprovedRequirement, NumberHitsApprovedRequirement\n#from boto.mturk.price import Price\n\n# load dotenv in the base root\nAPP_ROOT = os.path.join(os.path.dirname(__file__), '') # refers to application_top\ndotenv_path = os.path.join(APP_ROOT, '.env')\nload_dotenv(dotenv_path)\n\n#Start Configuration Variables\nAWS_ACCESS_KEY_ID = os.getenv('KEY')\nAWS_SECRET_ACCESS_KEY = os.getenv('SECRET_KEY')\nDEV_ENVIROMENT_BOOLEAN = True\nDEBUG = True\n#End Configuration Variables\n\n#This allows us to specify whether we are pushing to the sandbox or live site.\nif DEV_ENVIROMENT_BOOLEAN:\n AMAZON_HOST = \"https://mturk-requester-sandbox.us-east-1.amazonaws.com\"\nelse:\n AMAZON_HOST = \"https://mturk-requester.us-east-1.amazonaws.com\"\n\nconnection = boto3.client('mturk',\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY,\n region_name='us-east-1',\n endpoint_url=AMAZON_HOST)\n\n#5 cents per HIT\namount = 0.05\n\n#frame_height in pixels\nframe_height = 800\n\n#Here, I create two sample qualifications\n#qualifications = Qualifications()\n#qualifications.add(PercentAssignmentsApprovedRequirement(comparator=\"GreaterThan\", integer_value=\"90\"))\n#qualifications.add(NumberHitsApprovedRequirement(comparator=\"GreaterThan\", integer_value=\"100\"))\n\n#This url will be the url of your application, with appropriate GET parameters\nurl = \"https://salty-journey-20160.herokuapp.com/\"\nquestionform = open(file='questions.xml',mode='r').read()\n\n#questionform = boto3.question.ExternalQuestion(url, frame_height)\ncreate_hit_result = connection.create_hit(\n Title=\"Annotate this monologue\",\n Description=\"Annotate a monologue\",\n Keywords=\"annotation, languages, ML\",\n #duration is in seconds\n LifetimeInSeconds = 60*60,\n #max_assignments will set the amount of independent copies of the task (turkers can only see one)\n MaxAssignments=15,\n Question=questionform,\n Reward='0.15',\n AssignmentDurationInSeconds = 600,\n AutoApprovalDelayInSeconds = 14400,\n #Determines information returned by method in API, not super important\n #response_groups=('Minimal', 'HITDetail'),\n #qualifications=qualifications,\n)\nprint (\"A new HIT has been created. You can preview it here:\")\nprint (\"https://workersandbox.mturk.com/mturk/preview?groupId=\" + create_hit_result['HIT']['HITGroupId'])\nprint (\"HITID = \" + create_hit_result['HIT']['HITId'] + \" (Use to Get Results)\")\n", "sub_path": "post_hits.py", "file_name": "post_hits.py", "file_ext": "py", "file_size_in_byte": 2732, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "dotenv.load_dotenv", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 17, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 18, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "414026427", "text": "\"\"\"-----------------------------------------------------------------------------\nPURPOSE : SBL amendment process to record all Trade, Instrument, \n and Settlement updates done by the OPS SecLend and \n PCG Collateral user groups.\nREQUESTER, DEPATMENT : Jennitha Jugnath, PTS\nPROJECT : SBL onto FA\nDEVELOPER : Libor Svoboda\n--------------------------------------------------------------------------------\n\nHISTORY\n================================================================================\nDate Change no Developer Description\n--------------------------------------------------------------------------------\n2020-05-26 CHG0102232 Libor Svoboda Initial implementation\n\"\"\"\nimport csv\nimport datetime\nimport os\nimport xml.etree.ElementTree as ET\nfrom collections import defaultdict\n\nimport acm\nfrom at_ael_variables import AelVariableHandler\nfrom at_ats_utils import XmlOutput\nfrom at_logging import getLogger\nfrom at_time import acm_date\n\n\nLOGGER = getLogger(__name__)\nTODAY = acm.Time.DateToday()\nYESTERDAY = acm.Time.DateAddDelta(TODAY, 0, 0, -1)\nDATE_CHOICES = {\n 'Custom Date': TODAY,\n 'Date Today': TODAY,\n 'Yesterday': YESTERDAY,\n}\nCOLUMNS = (\n 'Object Type',\n 'Object ID',\n 'Instrument',\n 'InsType',\n 'Operation',\n 'Update Time',\n 'Update User',\n 'User Group',\n 'Underlying',\n 'Counterparty',\n 'Amended Object Type',\n 'Amended Object ID',\n 'Amended Object Operation',\n 'Amended Field',\n 'Original Value',\n 'New Value',\n)\nMAPPING_ENTITY = {\n 'Object Type': 'ObjectType',\n 'Object ID': 'ObjectID',\n 'Operation': 'Operation',\n 'Update Time': 'UpdateTime',\n 'Update User': 'UpdateUser',\n 'User Group': 'UpdateUserGroup',\n 'Instrument': 'Instrument',\n 'InsType': 'InsType',\n 'Underlying': 'Underlying',\n 'Counterparty': 'Counterparty',\n}\nMAPPING_AMENDMENT = {\n 'Amended Object Type': 'TableName',\n 'Amended Object ID': 'Oid',\n 'Amended Object Operation': 'Operation',\n}\nMAPPING_FIELD = {\n 'Amended Field': 'FieldName',\n 'Original Value': 'OriginalValue',\n 'New Value': 'NewValue',\n}\n\n\ndef enable_custom_date(ael_input, custom_date_var):\n custom_date = ael_variables.get(custom_date_var)\n if ael_input.value == 'Custom Date':\n custom_date.enabled = True\n else:\n custom_date.enabled = False\n custom_date.value = TODAY\n\n\nael_variables = AelVariableHandler()\nael_variables.add(\n 'run_date',\n label='Run Date',\n collection=list(DATE_CHOICES.keys()),\n default='Date Today',\n cls='string',\n hook=lambda x: enable_custom_date(x, 'run_date_custom'),\n)\nael_variables.add(\n 'run_date_custom',\n label='Custom Run Date',\n cls='date',\n)\nael_variables.add(\n 'input_dir',\n label='Input Directory',\n default='/services/frontnt/BackOffice/Atlas-End-Of-Day/TradeAmendment',\n)\nael_variables.add(\n 'input_file',\n label='Input File',\n default='SBL_Amendments_{:%Y-%m-%d}.xml',\n)\nael_variables.add(\n 'output_dir',\n label='Output Directory',\n default='/services/frontnt/BackOffice/Atlas-End-Of-Day/TradeAmendment',\n)\nael_variables.add(\n 'output_file',\n label='Output File',\n default='SBL_Amendments_Report_{:%Y-%m-%d}.csv',\n)\n\n\ndef write_data_to_csv(xml_path, csv_path):\n output = []\n with open(xml_path, 'r') as xml_output:\n xml_string = xml_output.read()\n try:\n root = ET.fromstring(xml_string)\n except ET.ParseError:\n LOGGER.warning('Could not parse %s, adding closing tag.' % xml_path)\n root = ET.fromstring(xml_string + Output.close_tag)\n for entity in root.findall('Entity'):\n entity_dict = defaultdict(str)\n for csv_col, xml_tag in MAPPING_ENTITY.items():\n entity_dict[csv_col] = entity.find(xml_tag).text\n amendments = entity.findall('Amendment')\n if not len(amendments):\n output.append(entity_dict)\n continue\n for amendment in amendments:\n amendment_dict = defaultdict(str)\n amendment_dict.update(entity_dict)\n for csv_col, xml_tag in MAPPING_AMENDMENT.items():\n amendment_dict[csv_col] = amendment.find(xml_tag).text\n fields = amendment.findall('Field')\n if not len(fields):\n output.append(amendment_dict)\n continue\n for field in fields:\n field_dict = defaultdict(str)\n field_dict.update(amendment_dict)\n for csv_col, xml_tag in MAPPING_FIELD.items():\n field_dict[csv_col] = field.find(xml_tag).text\n output.append(field_dict)\n with open(csv_path, 'wb') as csvfile:\n csvwriter = csv.DictWriter(csvfile, COLUMNS)\n csvwriter.writeheader()\n for row in output:\n csvwriter.writerow(row)\n\n\ndef get_date(params, param_name):\n date_choice = params[param_name]\n if date_choice == 'Custom Date':\n return acm_date(params[param_name + '_custom'])\n return DATE_CHOICES[date_choice]\n\n\ndef get_path(folder, file_name, run_date):\n path = os.path.join(folder, file_name)\n dt = datetime.datetime(*acm.Time.DateToYMD(run_date))\n return path.format(dt)\n\n\ndef ael_main(ael_params):\n LOGGER.msg_tracker.reset()\n run_date = get_date(ael_params, 'run_date')\n input_path = get_path(ael_params['input_dir'], ael_params['input_file'], run_date)\n output_path = get_path(ael_params['output_dir'], ael_params['output_file'], run_date)\n LOGGER.info('Processing %s' % input_path)\n write_data_to_csv(input_path, output_path)\n LOGGER.info('Wrote secondary output to: %s' % output_path)\n if LOGGER.msg_tracker.errors_counter:\n raise RuntimeError('ERRORS occurred. Please check the log.')\n LOGGER.info('Completed successfully.')\n", "sub_path": "Python modules/sbl_amends_report.py", "file_name": "sbl_amends_report.py", "file_ext": "py", "file_size_in_byte": 5895, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "at_logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "acm.Time.DateToday", "line_number": 30, "usage_type": "call"}, {"api_name": "acm.Time", "line_number": 30, "usage_type": "attribute"}, {"api_name": "acm.Time.DateAddDelta", "line_number": 31, "usage_type": "call"}, {"api_name": "acm.Time", "line_number": 31, "usage_type": "attribute"}, {"api_name": "at_ael_variables.AelVariableHandler", "line_number": 88, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 129, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 129, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.ParseError", "line_number": 130, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 130, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 132, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 132, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 134, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 142, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 151, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 157, "usage_type": "call"}, {"api_name": "at_time.acm_date", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 172, "usage_type": "call"}, {"api_name": "acm.Time.DateToYMD", "line_number": 172, "usage_type": "call"}, {"api_name": "acm.Time", "line_number": 172, "usage_type": "attribute"}]} +{"seq_id": "184651193", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom numpy import *\r\nimport pandas as pd\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import classification_report\r\n\r\n\r\ndef varying_num_of_k_KNN(x_test,y_test,x_train,y_train):\r\n neighbors=np.arange(1,902,3)\r\n train_error=np.empty(len(neighbors))\r\n test_error=np.empty(len(neighbors))\r\n for i,k in enumerate(neighbors):\r\n knn=KNeighborsClassifier(n_neighbors=k)\r\n knn.fit(x_train,y_train)\r\n train_error[i]=1-knn.score(x_train,y_train)\r\n test_error[i]=1-knn.score(x_test,y_test)\r\n plt.title('Varying Number of Neighbors')\r\n plt.plot(1/neighbors,test_error,label='Testing Error')\r\n plt.plot(1/neighbors,train_error,label='Training Error')\r\n plt.legend()\r\n plt.xlabel('1/(Number of Neighbors)')\r\n plt.ylabel('Error')\r\n plt.show()\r\n\r\ndef KNN(x_test,y_test,x_train,y_train,k):\r\n knn=KNeighborsClassifier(n_neighbors=k)\r\n knn.fit(x_train,y_train)\r\n y_pred=knn.predict(x_train)\r\n labels=y_train.tolist()\r\n conf_mat=confusion_matrix(y_train,y_pred,labels=None)\r\n print(\"confusion matrix:\")\r\n print(conf_mat)\r\n print(\"True Positive:0.422\")\r\n print(\"True Negetive:0.578\")\r\n print(\"report:\")\r\n print(classification_report(y_train,y_pred))\r\n\r\ndef varying_of_training_set_KNN(x_test,y_test,train0,train1):\r\n NUM=np.arange(50,901,50)\r\n n=np.empty(len(NUM))\r\n for N in enumerate(NUM):\r\n train=mat(zeros((N,5)))\r\n train=np.row_stack((train0[N/2,5],train1[N/2,5]))\r\n x_train=mat(zeros((N,4)))\r\n y_train=mat(zeros((N,1)))\r\n for i in range(4):\r\n x_train[:,i]=train[:,i]\r\n y_train[:,0]=train[:,4]\r\n y_train=y_train.ravel()\r\n y_train=y_train.T\r\n neighbors=np.arange(1,N+1,40)\r\n train_error=np.empty(len(neighbors))\r\n test_error=np.empty(len(neighbors))\r\n for i,k in enumerate(neighbors):\r\n knn=KNeighborsClassifier(n_neighbors=k)\r\n knn.fit(x_train,y_train)\r\n train_error[i]=1-knn.score(x_train,y_train)\r\n test_error[i]=1-knn.score(x_test,y_test)\r\n n[N]=min(test_error)\r\n plt.title('Learning Curve')\r\n plt.plot(NUM,n)\r\n plt.legend()\r\n plt.xlabel('Number of Training set')\r\n plt.ylabel('Optimal K')\r\n plt.show()\r\n \r\n \r\n \r\n\r\nf=open(r\"C:\\Users\\samsung\\Desktop\\EE559\\h1\\banknote.txt\")\r\nfirst_ele=True\r\nfor data in f.readlines():\r\n data=data.strip('\\n')\r\n nums=data.split(\",\")\r\n if first_ele:\r\n nums=[float(x) for x in nums]\r\n matrix=np.array(nums)\r\n first_ele=False\r\n else:\r\n nums=[float(x) for x in nums]\r\n matrix=np.c_[matrix,nums]\r\nf.close()\r\nmatrix.shape=(5,1372)\r\nm0=mat(zeros((1372,5)))\r\nm1=mat(zeros((1372,5)))\r\nmatrix1=matrix.T\r\nj=0\r\nt=0\r\nfor i in range(1372):\r\n if matrix1[i,4]==0:\r\n m0[j,:]=matrix1[i,:]\r\n j=j+1\r\n else:\r\n m1[t,:]=matrix1[i,:]\r\n t=t+1\r\n\r\nclass0=mat(zeros((j,5)))\r\nclass1=mat(zeros((t,5)))\r\nfor i in range(j):\r\n class0[i,:]=m0[i,:]\r\nfor i in range(t):\r\n class1[i,:]=m1[i,:]\r\n\r\ntest0=mat(zeros((200,5)))\r\ntest1=mat(zeros((200,5)))\r\ntrain0=mat(zeros((j-200,5)))\r\ntrain1=mat(zeros((t-200,5)))\r\nfor i in range(200):\r\n test0[i,:]=class0[i,:]\r\n test1[i,:]=class1[i,:]\r\nm=0\r\nn=0\r\nfor i in range(200,j):\r\n train0[m,:]=class0[i,:]\r\n m=m+1\r\nfor i in range(200,t):\r\n train1[n,:]=class1[i,:]\r\n n=n+1\r\n\r\ntest=mat(zeros((400,5)))\r\ntrain=mat(zeros((j+t-400,5)))\r\ntest=np.row_stack((test0,test1))\r\ntrain=np.row_stack((train0,train1))\r\nx_test=mat(zeros((400,4)))\r\ny_test=mat(zeros((400,1)))\r\nx_train=mat(zeros((j+t-400,4)))\r\ny_train=mat(zeros((j+t-400,1)))\r\n\r\nfor i in range(4):\r\n x_test[:,i]=test[:,i]\r\n x_train[:,i]=train[:,i]\r\ny_test[:,0]=test[:,4]\r\ny_train[:,0]=train[:,4]\r\ny_test=y_test.ravel()\r\ny_train=y_train.ravel()\r\ny_test=y_test.T\r\ny_train=y_train.T\r\n\r\n\r\n## Train and test error\r\n#neighbors=np.arange(1,902,3)\r\n#train_error=np.empty(len(neighbors))\r\n#test_error=np.empty(len(neighbors))\r\n#for i,k in enumerate(neighbors):\r\n# knn=KNeighborsClassifier(n_neighbors=k)\r\n# knn.fit(x_train,y_train)\r\n# train_error[i]=1-knn.score(x_train,y_train)\r\n# test_error[i]=1-knn.score(x_test,y_test)\r\n#plt.title('Varying Number of Neighbors')\r\n#plt.plot(1/neighbors,test_error,label='Testing Error')\r\n#plt.plot(1/neighbors,train_error,label='Training Error')\r\n#plt.legend()\r\n#plt.xlabel('1/(Number of Neighbors)')\r\n#plt.ylabel('Error')\r\n#plt.show()\r\n\r\n## Confusion matrix, true positive rate, true negative rate, precision and F-score\r\n#knn=KNeighborsClassifier(n_neighbors=1)\r\n#knn.fit(x_train,y_train)\r\n#y_pred=knn.predict(x_train)\r\n#labels=y_train.tolist()\r\n#conf_mat=confusion_matrix(y_train,y_pred,labels=None)\r\n#print(\"confusion matrix:\")\r\n#print(conf_mat)\r\n#print(\"True Positive:0.422\")\r\n#print(\"True Negetive:0.578\")\r\n#print(\"report:\")\r\n#print(classification_report(y_train,y_pred))\r\n\r\n\r\n## Learning Curve\r\nNUM=np.arange(50,801,50)\r\nn=np.empty(len(NUM))\r\nfor M,N in enumerate(NUM):\r\n train=mat(zeros((N,5)))\r\n N1=int(N/2)\r\n train00=mat(zeros((N1,5)))\r\n train11=mat(zeros((N1,5)))\r\n for i in range(N1):\r\n train00[i,:]=train0[i,:]\r\n train11[i,:]=train1[i,:]\r\n train=np.row_stack((train00,train11))\r\n x_train=mat(zeros((N,4)))\r\n y_train=mat(zeros((N,1)))\r\n for i in range(4):\r\n x_train[:,i]=train[:,i]\r\n y_train[:,0]=train[:,4]\r\n y_train=y_train.ravel()\r\n y_train=y_train.T\r\n neighbors=np.arange(1,N+1,40)\r\n train_error=np.empty(len(neighbors))\r\n test_error=np.empty(len(neighbors))\r\n for i,k in enumerate(neighbors):\r\n knn=KNeighborsClassifier(n_neighbors=k)\r\n knn.fit(x_train,y_train)\r\n train_error[i]=1-knn.score(x_train,y_train)\r\n test_error[i]=1-knn.score(x_test,y_test)\r\n n[M]=test_error.tolist().index(min(test_error))+1\r\n #n[M]=min(test_error) # best error rate\r\nplt.title('Learning Curve')\r\nplt.plot(NUM,n)\r\nplt.legend()\r\nplt.xlabel('Number of Training set')\r\nplt.ylabel('Optimal K')\r\nplt.show()\r\n#best error rate\r\n#plt.plot(NUM,n,label='Test Error')\r\n#plt.legend()\r\n#plt.xlabel('N')\r\n#plt.ylabel('Error')\r\n#plt.show()\r\n", "sub_path": "KNN.py", "file_name": "KNN.py", "file_ext": "py", "file_size_in_byte": 6244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.arange", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.row_stack", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.row_stack", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.row_stack", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.row_stack", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 194, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}]} +{"seq_id": "599093144", "text": "import os\nimport sys\nfrom os import path\nfrom threading import Thread\nfrom typing import Dict\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom mixorama.bartender import Bartender, BartenderState, CocktailAbortedException\nfrom mixorama.recipes import Recipe\nfrom mixorama.statemachine import InvalidStateMachineTransition\n\n# cmdline arguments, logger and config setup are handled by mixorama\nos.environ[\"KIVY_NO_ARGS\"] = \"1\"\nos.environ[\"KIVY_NO_CONSOLELOG\"] = \"1\"\nos.environ[\"KIVY_NO_FILELOG\"] = \"1\"\nos.environ[\"KIVY_NO_CONFIG\"] = \"1\"\n\n# kivy thinks too much of itself, really...\nsys_stderr = sys.stderr\nlogging_root = logging.root\nkivy_logger_setlevel = logging.getLogger('kivy').setLevel\nlogging.getLogger('kivy').setLevel = lambda level: None\n\nfrom kivy.config import Config\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.properties import ObjectProperty\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\n\n# restoring fucked up python settings\nsys.stderr = sys_stderr\nlogging.root = logging_root\nlogging.getLogger('kivy').setLevel = kivy_logger_setlevel\n\n\ndef is_gui_available():\n try:\n from kivy.core.window import Window\n return Window is not None\n except Exception:\n return False\n\n\ndef gui_config(config: Dict[str, Dict[str, str]]):\n for section, section_settings in config.items():\n for option, value in section_settings.items():\n Config.set(section, option, value)\n\n\ndef gui_run(menu, bartender):\n BartenderGuiApp(menu, bartender).run()\n\n\nclass BartenderGuiApp(App):\n def __init__(self, menu: Dict[str, Recipe], bartender: Bartender, **kwargs):\n super(BartenderGuiApp, self).__init__(**kwargs)\n self.menu = menu\n self.bartender = bartender\n\n def build(self):\n tpl_path = path.join(path.dirname(__file__), 'gui/layout.kv')\n Builder.load_file(tpl_path)\n return MainWidget(self.menu, self.bartender)\n\n\nclass TouchableButton(Button):\n def collide_point(self, x, y):\n parent = super(TouchableButton, self).collide_point(x, y)\n print('colliding touchable button @ ', x, y)\n return parent\n\n\nclass MainWidget(BoxLayout):\n menu_buttons = ObjectProperty(None)\n ''':type: kivy.uix.gridlayout.GridLayout'''\n image = ObjectProperty(None)\n ''':type: kivy.uix.image.Image'''\n total_progress = ObjectProperty(None)\n ''':type: kivy.uix.progressbar.ProgressBar'''\n step_progress = ObjectProperty(None)\n ''':type: kivy.uix.progressbar.ProgressBar'''\n abort_btn = ObjectProperty(None)\n ''':type: kivy.uix.button.Button'''\n make_btn = ObjectProperty(None)\n ''':type: kivy.uix.button.Button'''\n info_ul = ObjectProperty(None)\n ''':type: kivy.uix.label.Label'''\n info_ur = ObjectProperty(None)\n ''':type: kivy.uix.label.Label'''\n info_bl = ObjectProperty(None)\n ''':type: kivy.uix.label.Label'''\n info_br = ObjectProperty(None)\n ''':type: kivy.uix.label.Label'''\n\n staged_recipe = None\n ''':type: Recipe'''\n\n def __init__(self, menu: Dict[str, Recipe], bartender: Bartender, **kwargs):\n super(MainWidget, self).__init__(**kwargs)\n self.bartender = bartender\n self.menu = menu\n\n bartender.on_sm_transitions(\n enum=BartenderState,\n IDLE=self.on_idle,\n MAKING=self.on_making,\n READY=self.on_ready,\n POURING_PROGRESS=self.on_pouring_progress,\n ABORTED=self.on_abort\n )\n\n self.build_cocktail_buttons(menu)\n self.abort_btn.bind(on_press=self.on_abort_btn_press)\n self.make_btn.bind(on_press=self.on_make_btn_press)\n\n self.on_idle()\n self.stage_recipe(list(menu.values())[0])\n\n def build_cocktail_buttons(self, menu):\n for key, recipe in menu.items():\n horiz_size = 1 / 3 # arrange in 3 columns\n vert_size = horiz_size / len(menu) # percent of parent\n\n b = Button(text=recipe.name, size_hint=(horiz_size, vert_size),\n on_press=lambda *args, r=recipe: self.stage_recipe(r))\n\n self.menu_buttons.add_widget(b)\n\n def stage_recipe(self, recipe: Recipe):\n self.staged_recipe = recipe\n self.info_ul.text = 'Volume: {} ml'.format(recipe.volume())\n self.info_bl.text = 'Strength: {:.2f}%, ABV'.format(recipe.strength())\n\n if recipe.image:\n self.image.source = recipe.image\n\n def reset_progress(self, total=0, step=0):\n self.total_progress.value = total\n self.step_progress.value = step\n\n def on_abort_btn_press(self, target):\n try:\n self.bartender.abort()\n except InvalidStateMachineTransition as e:\n logger.exception(e)\n\n def on_make_btn_press(self, target):\n if self.staged_recipe:\n recipe_components = list(dict(self.staged_recipe.sequence).keys())\n\n def on_pouring(component):\n progress = recipe_components.index(component) + 1 / len(self.menu)\n self.total_progress.value = progress * 100\n\n self.bartender.on_sm_transitions(enum=BartenderState, POURING=on_pouring)\n\n def maker():\n try:\n self.bartender.make_drink(self.staged_recipe.sequence)\n self.bartender.serve()\n except CocktailAbortedException:\n self.bartender.discard()\n\n Thread(daemon=True, target=maker).start()\n\n def on_idle(self):\n self.make_btn.disabled = False\n self.abort_btn.disabled = True\n self.info_br.text = 'Ready to make drinks!'\n self.reset_progress()\n\n def on_making(self):\n self.make_btn.disabled = True\n self.abort_btn.disabled = False\n self.info_br.text = 'Making your drink ...'\n self.step_progress.value = 0\n\n def on_ready(self):\n self.info_br.text = 'Take your drink'\n\n def on_abort(self):\n self.info_br.text = 'Cocktail aborted. Please dump the glass contents'\n\n def on_pouring_progress(self, done, volume):\n self.step_progress.value = done / volume * 100\n", "sub_path": "mixorama/gui.py", "file_name": "gui.py", "file_ext": "py", "file_size_in_byte": 6138, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 20, "usage_type": "attribute"}, {"api_name": "logging.root", "line_number": 21, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.root", "line_number": 34, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 35, "usage_type": "call"}, {"api_name": "kivy.core.window.Window", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 46, "usage_type": "name"}, {"api_name": "kivy.config.Config.set", "line_number": 49, "usage_type": "call"}, {"api_name": "kivy.config.Config", "line_number": 49, "usage_type": "name"}, {"api_name": "kivy.app.App", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 57, "usage_type": "name"}, {"api_name": "mixorama.recipes.Recipe", "line_number": 57, "usage_type": "name"}, {"api_name": "mixorama.bartender.Bartender", "line_number": 57, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 63, "usage_type": "call"}, {"api_name": "kivy.lang.Builder.load_file", "line_number": 64, "usage_type": "call"}, {"api_name": "kivy.lang.Builder", "line_number": 64, "usage_type": "name"}, {"api_name": "kivy.uix.button.Button", "line_number": 68, "usage_type": "name"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 75, "usage_type": "name"}, {"api_name": "kivy.properties.ObjectProperty", "line_number": 76, "usage_type": "call"}, {"api_name": "kivy.properties.ObjectProperty", "line_number": 78, "usage_type": "call"}, {"api_name": "kivy.properties.ObjectProperty", "line_number": 80, "usage_type": "call"}, {"api_name": "kivy.properties.ObjectProperty", "line_number": 82, "usage_type": "call"}, {"api_name": "kivy.properties.ObjectProperty", "line_number": 84, "usage_type": "call"}, {"api_name": "kivy.properties.ObjectProperty", "line_number": 86, "usage_type": "call"}, {"api_name": "kivy.properties.ObjectProperty", "line_number": 88, "usage_type": "call"}, {"api_name": "kivy.properties.ObjectProperty", "line_number": 90, "usage_type": "call"}, {"api_name": "kivy.properties.ObjectProperty", "line_number": 92, "usage_type": "call"}, {"api_name": "kivy.properties.ObjectProperty", "line_number": 94, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 100, "usage_type": "name"}, {"api_name": "mixorama.recipes.Recipe", "line_number": 100, "usage_type": "name"}, {"api_name": "mixorama.bartender.Bartender", "line_number": 100, "usage_type": "name"}, {"api_name": "mixorama.bartender.BartenderState", "line_number": 106, "usage_type": "name"}, {"api_name": "kivy.uix.button.Button", "line_number": 126, "usage_type": "call"}, {"api_name": "mixorama.recipes.Recipe", "line_number": 131, "usage_type": "name"}, {"api_name": "mixorama.statemachine.InvalidStateMachineTransition", "line_number": 146, "usage_type": "name"}, {"api_name": "mixorama.bartender.BartenderState", "line_number": 157, "usage_type": "name"}, {"api_name": "mixorama.bartender.CocktailAbortedException", "line_number": 163, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 166, "usage_type": "call"}]} +{"seq_id": "576890009", "text": "import pymongo as mongo\nimport bson.json_util as json\nimport webob\nimport bemtevi.database as database\n\ndef list(request):\n client = database.client()\n db = client.bemtevi\n collection = db.authors\n authors = collection.find()\n response = json.dumps(authors)\n return webob.Response(response, content_type = 'application/json')\n\ndef create(request):\n client = database.client()\n db = client.bemtevi\n collection = db.authors\n author = request.json_body\n collection.insert(author)\n response = json.dumps(author)\n return webob.Response(response, content_type = 'application/json')", "sub_path": "bemtevi/controllers/authors_controller.py", "file_name": "authors_controller.py", "file_ext": "py", "file_size_in_byte": 614, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "bemtevi.database.client", "line_number": 7, "usage_type": "call"}, {"api_name": "bemtevi.database", "line_number": 7, "usage_type": "name"}, {"api_name": "bson.json_util.dumps", "line_number": 11, "usage_type": "call"}, {"api_name": "bson.json_util", "line_number": 11, "usage_type": "name"}, {"api_name": "webob.Response", "line_number": 12, "usage_type": "call"}, {"api_name": "bemtevi.database.client", "line_number": 15, "usage_type": "call"}, {"api_name": "bemtevi.database", "line_number": 15, "usage_type": "name"}, {"api_name": "bson.json_util.dumps", "line_number": 20, "usage_type": "call"}, {"api_name": "bson.json_util", "line_number": 20, "usage_type": "name"}, {"api_name": "webob.Response", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "570458886", "text": "import traceback, requests,json\ndef decode_subid(subid):\n url = \"http://login.blendernetworks.com/subid_decode.php?auth=37a9c5c97a0ec96fc337507ba36fc2000aa5d95f&subids=\" + subid\n\n session = requests.Session()\n\n try:\n response = session.post(url)\n json_resp = json.loads(response.text)\n if len(json_resp)==0:\n return \"\"\n for encoded_source_id,decoded_row in json_resp.iteritems():\n pid = decoded_row['pid']\n sid = decoded_row['sid']\n decoded_subid = decoded_row['subid']\n return pid + \"_\" + sid + \"_\" + decoded_subid\n except:\n decode_subid(subid)\n\n", "sub_path": "Third_Party_Parsers/subid_decoder.py", "file_name": "subid_decoder.py", "file_ext": "py", "file_size_in_byte": 649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.Session", "line_number": 5, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "520159401", "text": "import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n ret, frame = cap.read()\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # 查表\n lower_color = np.array([0, 0, 0])\n upper_color = np.array([180, 255, 46])\n\n # 让lower_red和upper_red范围之间的变为白色 别的范围的变为黑色\n mask = cv2.inRange(hsv, lower_color, upper_color)\n res = cv2.bitwise_and(frame, frame, mask=mask)\n\n kernel = np.ones((5, 5), np.uint8)\n\n opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) # 开操作背景无噪点\n closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) # 闭操作识别到的区域无噪点\n\n cv2.imshow('frame', frame)\n cv2.imshow('res', res)\n cv2.imshow('opening', opening)\n cv2.imshow('closing', closing)\n\n k = cv2.waitKey(5) & 0xFF\n if k == 27: # Esc:27\n break\n\ncv2.destroyAllWindows()\ncap.release()\n", "sub_path": "开操作和闭操作.py", "file_name": "开操作和闭操作.py", "file_ext": "py", "file_size_in_byte": 909, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "cv2.VideoCapture", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.MORPH_CLOSE", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "650688654", "text": "\"\"\"\nCopyright ©2019. The Regents of the University of California (Regents). All Rights Reserved.\n\nPermission to use, copy, modify, and distribute this software and its documentation\nfor educational, research, and not-for-profit purposes, without fee and without a\nsigned licensing agreement, is hereby granted, provided that the above copyright\nnotice, this paragraph and the following two paragraphs appear in all copies,\nmodifications, and distributions.\n\nContact The Office of Technology Licensing, UC Berkeley, 2150 Shattuck Avenue,\nSuite 510, Berkeley, CA 94720-1620, (510) 643-7201, otl@berkeley.edu,\nhttp://ipira.berkeley.edu/industry-info for commercial licensing opportunities.\n\nIN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,\nINCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF\nTHE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED\nOF THE POSSIBILITY OF SUCH DAMAGE.\n\nREGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE\nSOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED\n\"AS IS\". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,\nENHANCEMENTS, OR MODIFICATIONS.\n\"\"\"\n\nfrom copy import deepcopy\n\nfrom boac.api.util import authorized_users_api_feed\nfrom boac.externals import data_loch\nfrom boac.lib.berkeley import BERKELEY_DEPT_NAME_TO_CODE, COE_ETHNICITIES_PER_CODE, term_name_for_sis_id\nfrom boac.merged import athletics\nfrom boac.merged.calnet import get_csid_for_uid\nfrom boac.merged.student import get_student_query_scope\nfrom boac.models.authorized_user import AuthorizedUser\nfrom flask import current_app as app\nfrom flask_login import current_user\n\n\ndef translate_to_filter_options(owner_uid, criteria=None):\n rows = []\n if criteria:\n for definitions in _get_filter_options(get_student_query_scope(), owner_uid):\n for definition in definitions:\n selected = criteria.get(definition['key'])\n if selected is not None:\n if definition['type'] == 'array':\n for selection in selected:\n rows.append(_translate_filter_row(definition, selection))\n else:\n rows.append(_translate_filter_row(definition, selected))\n return rows\n\n\ndef get_cohort_filter_options(owner_uid, existing_filters):\n # Default menu has all options available. Options vary with cohort owner since the \"My Students\" filter includes\n # an list of the cohort owner's academic plans.\n filter_categories = _get_filter_options(get_student_query_scope(), owner_uid)\n menus = [menu for category in filter_categories for menu in category]\n for key in _keys_of_type_boolean(existing_filters):\n # Disable sub_menu options if they are already in cohort criteria\n for menu in menus:\n if menu['key'] == key:\n # Disable 'boolean' sub_menu (e.g., 'isInactiveCoe') if it is already in cohort criteria\n menu['disabled'] = True\n # Get filters of type 'range' (e.g., 'last name')\n for key, values in _selections_of_type('range', existing_filters).items():\n menu = next(s for s in menus if s['key'] == key)\n menu['disabled'] = True\n # Get filters of type 'array' (e.g., 'levels')\n for key, values in _selections_of_type('array', existing_filters).items():\n menu = next(s for s in menus if s['key'] == key)\n if len(values) == len(menu['options']):\n # If count of selected values equals number of options then disable the sub_menu\n menu['disabled'] = True\n for option in menu['options']:\n if option['value'] in values:\n # Disable sub_menu options that are already in cohort criteria\n option['disabled'] = True\n return filter_categories\n\n\ndef _get_filter_options(scope, cohort_owner_uid):\n all_dept_codes = list(BERKELEY_DEPT_NAME_TO_CODE.values())\n categories = [\n [\n {\n 'availableTo': all_dept_codes,\n 'defaultValue': None,\n 'key': 'expectedGradTerms',\n 'name': 'Expected Graduation Term',\n 'options': _grad_terms,\n 'subcategoryHeader': 'Choose...',\n 'type': 'array',\n },\n {\n 'availableTo': all_dept_codes,\n 'defaultValue': None,\n 'key': 'gpaRanges',\n 'name': 'GPA',\n 'options': _gpa_ranges,\n 'subcategoryHeader': 'Choose...',\n 'type': 'array',\n },\n {\n 'availableTo': all_dept_codes,\n 'defaultValue': None,\n 'key': 'levels',\n 'name': 'Level',\n 'options': _class_levels,\n 'subcategoryHeader': 'Choose...',\n 'type': 'array',\n },\n {\n 'availableTo': all_dept_codes,\n 'defaultValue': None,\n 'key': 'majors',\n 'name': 'Major',\n 'options': _majors,\n 'subcategoryHeader': 'Choose...',\n 'type': 'array',\n },\n {\n 'availableTo': all_dept_codes,\n 'defaultValue': None,\n 'key': 'transfer',\n 'name': 'Transfer Student',\n 'options': [True, False],\n 'subcategoryHeader': 'Choose...',\n 'type': 'boolean',\n },\n {\n 'availableTo': all_dept_codes,\n 'defaultValue': None,\n 'key': 'unitRanges',\n 'name': 'Units Completed',\n 'options': _unit_ranges,\n 'subcategoryHeader': 'Choose...',\n 'type': 'array',\n },\n ],\n [\n {\n 'availableTo': all_dept_codes,\n 'defaultValue': None,\n 'key': 'ethnicities',\n 'name': 'Ethnicity',\n 'options': _ethnicities,\n 'subcategoryHeader': 'Choose...',\n 'type': 'array',\n },\n {\n 'availableTo': all_dept_codes,\n 'defaultValue': None,\n 'key': 'genders',\n 'name': 'Gender',\n 'options': _genders,\n 'subcategoryHeader': 'Choose...',\n 'type': 'array',\n },\n {\n 'availableTo': all_dept_codes,\n 'defaultValue': None,\n 'key': 'underrepresented',\n 'name': 'Underrepresented Minority',\n 'options': [True, False],\n 'type': 'boolean',\n },\n ],\n [\n {\n 'availableTo': ['UWASC'],\n 'defaultValue': False if 'UWASC' in scope else None,\n 'key': 'isInactiveAsc',\n 'name': 'Inactive' if 'UWASC' in scope else 'Inactive (ASC)',\n 'options': [True, False],\n 'type': 'boolean',\n },\n {\n 'availableTo': ['UWASC'],\n 'defaultValue': None,\n 'key': 'inIntensiveCohort',\n 'name': 'Intensive',\n 'options': [True, False],\n 'type': 'boolean',\n },\n {\n 'availableTo': ['UWASC'],\n 'defaultValue': None,\n 'key': 'groupCodes',\n 'name': 'Team',\n 'options': _team_groups,\n 'subcategoryHeader': 'Choose...',\n 'type': 'array',\n },\n ],\n [\n {\n 'availableTo': ['COENG'],\n 'defaultValue': None,\n 'key': 'coeAdvisorLdapUids',\n 'name': 'Advisor (COE)',\n 'options': _get_coe_profiles,\n 'subcategoryHeader': 'Choose...',\n 'type': 'array',\n },\n {\n 'availableTo': ['COENG'],\n 'defaultValue': None,\n 'key': 'coeEthnicities',\n 'name': 'Ethnicity (COE)',\n 'options': _coe_ethnicities,\n 'subcategoryHeader': 'Choose...',\n 'type': 'array',\n },\n {\n 'availableTo': ['COENG'],\n 'defaultValue': None,\n 'key': 'coeGenders',\n 'name': 'Gender (COE)',\n 'options': _coe_genders,\n 'subcategoryHeader': 'Choose...',\n 'type': 'array',\n },\n {\n 'availableTo': ['COENG'],\n 'defaultValue': False if 'COENG' in scope else None,\n 'key': 'isInactiveCoe',\n 'name': 'Inactive' if 'COENG' in scope else 'Inactive (COE)',\n 'options': [True, False],\n 'type': 'boolean',\n },\n {\n 'availableTo': all_dept_codes,\n 'defaultValue': None,\n 'key': 'lastNameRange',\n 'name': 'Last Name',\n 'options': None,\n 'subcategoryHeader': ['Initials', 'through'],\n 'type': 'range',\n },\n {\n 'availableTo': all_dept_codes,\n 'defaultValue': None,\n 'key': 'cohortOwnerAcademicPlans',\n 'name': 'My Students',\n 'options': _academic_plans_for_cohort_owner(cohort_owner_uid),\n 'subcategoryHeader': 'Choose academic plan...',\n 'type': 'array',\n },\n {\n 'availableTo': ['COENG'],\n 'defaultValue': None,\n 'key': 'coePrepStatuses',\n 'name': 'PREP',\n 'options': _coe_prep_statuses,\n 'subcategoryHeader': 'Choose...',\n 'type': 'array',\n },\n {\n 'availableTo': ['COENG'],\n 'defaultValue': None,\n 'key': 'coeProbation',\n 'name': 'Probation',\n 'options': [True, False],\n 'type': 'boolean',\n },\n {\n 'availableTo': ['COENG'],\n 'defaultValue': None,\n 'key': 'coeUnderrepresented',\n 'name': 'Underrepresented Minority (COE)',\n 'options': [True, False],\n 'type': 'boolean',\n },\n ],\n ]\n available_categories = []\n\n def is_available(d):\n available = 'ADMIN' in scope or next((dept_code for dept_code in d['availableTo'] if dept_code in scope), False)\n if available:\n # If it is available then populate menu options\n options = d.pop('options')\n d['options'] = options() if callable(options) else options\n return available\n\n for category in categories:\n available_categories.append(list(filter(lambda d: is_available(d), category)))\n # Remove unavailable (ie, empty) categories\n return list(filter(lambda g: len(g), available_categories))\n\n\ndef _translate_filter_row(definition, selection=None):\n clone = deepcopy(definition)\n row = {k: clone.get(k) for k in ['key', 'name', 'options', 'subcategoryHeader', 'type']}\n if definition['type'] == 'array':\n option = next((o for o in row.get('options', []) if o['value'] == selection), None)\n if option:\n row['value'] = option['value']\n else:\n row['value'] = selection\n return row\n\n\ndef _keys_of_type_boolean(rows):\n # First, get selected 'boolean' options (e.g., 'coeProbation') from cohort criteria.\n existing_boolean_rows = list(filter(lambda row: row['type'] in ['boolean'], rows))\n return list(map(lambda r: r['key'], existing_boolean_rows))\n\n\ndef _selections_of_type(filter_type, existing_filters):\n rows = list(filter(lambda row: row['type'] in [filter_type], existing_filters))\n unique_keys = set(map(lambda row: row['key'], rows))\n selections = dict.fromkeys(unique_keys)\n for row in rows:\n key = row['key']\n if not selections[key]:\n selections[key] = []\n value = row.get('value')\n if value:\n selections[key].append(value)\n return selections\n\n\ndef _get_coe_profiles():\n users = list(filter(lambda _user: 'COENG' in _get_dept_codes(_user), AuthorizedUser.get_all_active_users()))\n profiles = []\n for user in authorized_users_api_feed(users):\n uid = user['uid']\n first_name = user.get('firstName')\n last_name = user.get('lastName')\n name = f'{first_name} {last_name}' if first_name or last_name else f'UID: {uid}'\n profiles.append({'name': name, 'value': uid})\n return sorted(profiles, key=lambda p: p['name'])\n\n\ndef _academic_plans_for_cohort_owner(owner_uid):\n if owner_uid:\n owner_csid = get_csid_for_uid(app, owner_uid)\n else:\n owner_csid = current_user.get_csid()\n plans = [\n {'name': 'All plans', 'value': '*'},\n ]\n plan_results = data_loch.get_academic_plans_for_advisor(owner_csid)\n for row in plan_results:\n value = row['academic_plan_code']\n if value:\n plans.append({'name': row['academic_plan'], 'value': value})\n return plans\n\n\ndef _unit_ranges():\n return [\n {'name': '0 - 29', 'value': 'numrange(NULL, 30, \\'[)\\')'},\n {'name': '30 - 59', 'value': 'numrange(30, 60, \\'[)\\')'},\n {'name': '60 - 89', 'value': 'numrange(60, 90, \\'[)\\')'},\n {'name': '90 - 119', 'value': 'numrange(90, 120, \\'[)\\')'},\n {'name': '120 +', 'value': 'numrange(120, NULL, \\'[)\\')'},\n ]\n\n\ndef _class_levels():\n return [\n {'name': 'Freshman (0-29 Units)', 'value': 'Freshman'},\n {'name': 'Sophomore (30-59 Units)', 'value': 'Sophomore'},\n {'name': 'Junior (60-89 Units)', 'value': 'Junior'},\n {'name': 'Senior (90+ Units)', 'value': 'Senior'},\n ]\n\n\ndef _coe_prep_statuses():\n return [\n {'name': 'PREP', 'value': 'did_prep'},\n {'name': 'PREP eligible', 'value': 'prep_eligible'},\n {'name': 'T-PREP', 'value': 'did_tprep'},\n {'name': 'T-PREP eligible', 'value': 'tprep_eligible'},\n ]\n\n\ndef _coe_genders():\n return [\n {'name': 'Female', 'value': 'F'},\n {'name': 'Male', 'value': 'M'},\n ]\n\n\ndef _ethnicities():\n return [{'name': row['ethnicity'], 'value': row['ethnicity']} for row in data_loch.get_distinct_ethnicities()]\n\n\ndef _genders():\n return [{'name': row['gender'], 'value': row['gender']} for row in data_loch.get_distinct_genders()]\n\n\ndef _grad_terms():\n term_ids = [r['expected_grad_term'] for r in data_loch.get_expected_graduation_terms()]\n return [{'name': term_name_for_sis_id(term_id), 'value': term_id} for term_id in term_ids]\n\n\ndef _gpa_ranges():\n return [\n {'name': '3.50 - 4.00', 'value': 'numrange(3.5, 4, \\'[]\\')'},\n {'name': '3.00 - 3.49', 'value': 'numrange(3, 3.5, \\'[)\\')'},\n {'name': '2.50 - 2.99', 'value': 'numrange(2.5, 3, \\'[)\\')'},\n {'name': '2.00 - 2.49', 'value': 'numrange(2, 2.5, \\'[)\\')'},\n {'name': 'Below 2.0', 'value': 'numrange(0, 2, \\'[)\\')'},\n ]\n\n\ndef _coe_ethnicities():\n rows = data_loch.get_coe_ethnicity_codes(['COENG'])\n key = 'ethnicity_code'\n\n def ethnicity(code):\n return COE_ETHNICITIES_PER_CODE.get(code)\n coe_ethnicities = [{'name': ethnicity(row[key]), 'value': row[key]} for row in rows]\n return sorted(coe_ethnicities, key=lambda e: e['name'])\n\n\ndef _team_groups():\n rows = athletics.all_team_groups()\n return [{'name': row['groupName'], 'value': row['groupCode']} for row in rows]\n\n\ndef _majors():\n major_results = [row['major'] for row in data_loch.get_majors()]\n return [{'name': major, 'value': major} for major in major_results]\n\n\ndef _get_dept_codes(user):\n return [m.university_dept.dept_code for m in user.department_memberships] if user else None\n", "sub_path": "boac/merged/cohort_filter_options.py", "file_name": "cohort_filter_options.py", "file_ext": "py", "file_size_in_byte": 16233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "boac.merged.student.get_student_query_scope", "line_number": 42, "usage_type": "call"}, {"api_name": "boac.merged.student.get_student_query_scope", "line_number": 57, "usage_type": "call"}, {"api_name": "boac.lib.berkeley.BERKELEY_DEPT_NAME_TO_CODE.values", "line_number": 83, "usage_type": "call"}, {"api_name": "boac.lib.berkeley.BERKELEY_DEPT_NAME_TO_CODE", "line_number": 83, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 294, "usage_type": "call"}, {"api_name": "boac.models.authorized_user.AuthorizedUser.get_all_active_users", "line_number": 326, "usage_type": "call"}, {"api_name": "boac.models.authorized_user.AuthorizedUser", "line_number": 326, "usage_type": "name"}, {"api_name": "boac.api.util.authorized_users_api_feed", "line_number": 328, "usage_type": "call"}, {"api_name": "boac.merged.calnet.get_csid_for_uid", "line_number": 339, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 339, "usage_type": "argument"}, {"api_name": "flask_login.current_user.get_csid", "line_number": 341, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 341, "usage_type": "name"}, {"api_name": "boac.externals.data_loch.get_academic_plans_for_advisor", "line_number": 345, "usage_type": "call"}, {"api_name": "boac.externals.data_loch", "line_number": 345, "usage_type": "name"}, {"api_name": "boac.externals.data_loch.get_distinct_ethnicities", "line_number": 389, "usage_type": "call"}, {"api_name": "boac.externals.data_loch", "line_number": 389, "usage_type": "name"}, {"api_name": "boac.externals.data_loch.get_distinct_genders", "line_number": 393, "usage_type": "call"}, {"api_name": "boac.externals.data_loch", "line_number": 393, "usage_type": "name"}, {"api_name": "boac.externals.data_loch.get_expected_graduation_terms", "line_number": 397, "usage_type": "call"}, {"api_name": "boac.externals.data_loch", "line_number": 397, "usage_type": "name"}, {"api_name": "boac.lib.berkeley.term_name_for_sis_id", "line_number": 398, "usage_type": "call"}, {"api_name": "boac.externals.data_loch.get_coe_ethnicity_codes", "line_number": 412, "usage_type": "call"}, {"api_name": "boac.externals.data_loch", "line_number": 412, "usage_type": "name"}, {"api_name": "boac.lib.berkeley.COE_ETHNICITIES_PER_CODE.get", "line_number": 416, "usage_type": "call"}, {"api_name": "boac.lib.berkeley.COE_ETHNICITIES_PER_CODE", "line_number": 416, "usage_type": "name"}, {"api_name": "boac.merged.athletics.all_team_groups", "line_number": 422, "usage_type": "call"}, {"api_name": "boac.merged.athletics", "line_number": 422, "usage_type": "name"}, {"api_name": "boac.externals.data_loch.get_majors", "line_number": 427, "usage_type": "call"}, {"api_name": "boac.externals.data_loch", "line_number": 427, "usage_type": "name"}]} +{"seq_id": "85405036", "text": "from bs4 import BeautifulSoup\nfrom parse_drug_names import parse_drug_names\nfrom open_page_by_link import open_page_by_link\nfrom transliterate import slugify\nfrom json_utils import save_json_to_file\n\nWEBSITE_URL = 'https://www.rlsnet.ru'\nWEBSITE_NAME = 'rlsnet'\n\n\ndef get_reviews_url(drug_name):\n return f'{WEBSITE_URL}/comment/{drug_name}'\n\n\ndef parse_drug_reviews(drug_name):\n \"\"\"\n 1. Get reviews url\n 2. Parse reviews\n \"\"\"\n reviews = []\n drug_name_en = slugify(drug_name)\n reviews_link = get_reviews_url(drug_name_en)\n page = open_page_by_link(reviews_link)\n soup = BeautifulSoup(page, 'html.parser')\n reviews_list = soup.find_all('div', 'comment_text')\n for review in reviews_list:\n reviews.append({'comment': review.get_text()})\n return reviews\n\n\nif __name__ == \"__main__\":\n drug_names = parse_drug_names()\n for drug_name in drug_names:\n reviews_data = parse_drug_reviews(drug_name)\n if reviews_data:\n save_json_to_file(f'../data/{drug_name}_{WEBSITE_NAME}.json', reviews_data)\n", "sub_path": "parsers/rlsnet_parser.py", "file_name": "rlsnet_parser.py", "file_ext": "py", "file_size_in_byte": 1060, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "transliterate.slugify", "line_number": 21, "usage_type": "call"}, {"api_name": "open_page_by_link.open_page_by_link", "line_number": 23, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}, {"api_name": "parse_drug_names.parse_drug_names", "line_number": 32, "usage_type": "call"}, {"api_name": "json_utils.save_json_to_file", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "304874221", "text": "import numpy as np\nfrom pymoo.model.sampling import Sampling\nfrom Solution import Solution\nimport config as cf\nfrom schedule_gen import ScheduleGen\n\n\nclass MyTcSampling(Sampling):\n def _do(self, problem, n_samples, **kwargs):\n schedules = ScheduleGen(\n cf.model[\"temp_min\"],\n cf.model[\"temp_max\"],\n cf.model[\"jump\"],\n cf.model[\"duration_min\"],\n cf.model[\"duration_max\"],\n cf.model[\"model_num\"],\n )\n\n X = np.full((n_samples, 1), None, dtype=np.object)\n\n for i in range(n_samples):\n schedule = schedules.test_case_generate()\n s = Solution()\n s.states = schedule\n\n X[i, 0] = s\n\n return X\n", "sub_path": "RQ1_RQ2/Thermostat_case_study/EVALUATION/Pymoo_MO/MyTcSampling.py", "file_name": "MyTcSampling.py", "file_ext": "py", "file_size_in_byte": 733, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pymoo.model.sampling.Sampling", "line_number": 8, "usage_type": "name"}, {"api_name": "schedule_gen.ScheduleGen", "line_number": 10, "usage_type": "call"}, {"api_name": "config.model", "line_number": 11, "usage_type": "attribute"}, {"api_name": "config.model", "line_number": 12, "usage_type": "attribute"}, {"api_name": "config.model", "line_number": 13, "usage_type": "attribute"}, {"api_name": "config.model", "line_number": 14, "usage_type": "attribute"}, {"api_name": "config.model", "line_number": 15, "usage_type": "attribute"}, {"api_name": "config.model", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.object", "line_number": 19, "usage_type": "attribute"}, {"api_name": "Solution.Solution", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "403685473", "text": "import os\nimport random\n\nfrom config import REPEAT_STIMS_PER_SESSION_TIMES\n\n\ndef create_dir(dir_path):\n try:\n os.mkdir(dir_path)\n except OSError:\n print(\"Creation of the directory %s failed\" % dir_path)\n else:\n print(\"Successfully created the directory %s \" % dir_path)\n\n\ndef prepare_stims(stims):\n result = []\n\n for i in range(REPEAT_STIMS_PER_SESSION_TIMES):\n random.shuffle(stims)\n\n result = result + stims\n\n return result\n", "sub_path": "server/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 479, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.mkdir", "line_number": 9, "usage_type": "call"}, {"api_name": "config.REPEAT_STIMS_PER_SESSION_TIMES", "line_number": 19, "usage_type": "argument"}, {"api_name": "random.shuffle", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "134924577", "text": "from icubam import config\nfrom icubam.db import gsheets\nfrom absl.testing import absltest\nimport os\nimport unittest\n\n\nclass GsheetsTest(absltest.TestCase):\n gsheets_tokens_def = all([os.environ.get(key, False)\n for key in ['SHEET_ID', 'TOKEN_LOC']])\n\n def setUp(self):\n super().setUp()\n self.config = config.Config('resources/test.toml', mode='dev')\n\n @unittest.skipIf(not gsheets_tokens_def,\n \"SHEET_ID or TOKEN_LOC env variables not set\")\n def test_users(self):\n shdb = gsheets.SheetsDB(self.config.TOKEN_LOC, self.config.SHEET_ID)\n shdb.get_users()\n\n @unittest.skipIf(not gsheets_tokens_def,\n \"SHEET_ID or TOKEN_LOC env variables not set\")\n def test_icus(self):\n shdb = gsheets.SheetsDB(self.config.TOKEN_LOC, self.config.SHEET_ID)\n shdb.get_icus()\n\n\nif __name__ == \"__main__\":\n absltest.main()\n", "sub_path": "icubam/db/test_gsheets.py", "file_name": "test_gsheets.py", "file_ext": "py", "file_size_in_byte": 885, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "absl.testing.absltest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "absl.testing.absltest", "line_number": 8, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "icubam.config.Config", "line_number": 14, "usage_type": "call"}, {"api_name": "icubam.config", "line_number": 14, "usage_type": "name"}, {"api_name": "icubam.db.gsheets.SheetsDB", "line_number": 19, "usage_type": "call"}, {"api_name": "icubam.db.gsheets", "line_number": 19, "usage_type": "name"}, {"api_name": "unittest.skipIf", "line_number": 16, "usage_type": "call"}, {"api_name": "icubam.db.gsheets.SheetsDB", "line_number": 25, "usage_type": "call"}, {"api_name": "icubam.db.gsheets", "line_number": 25, "usage_type": "name"}, {"api_name": "unittest.skipIf", "line_number": 22, "usage_type": "call"}, {"api_name": "absl.testing.absltest.main", "line_number": 30, "usage_type": "call"}, {"api_name": "absl.testing.absltest", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "618579270", "text": "import discord\r\nimport asyncio\r\nimport random\r\nimport pickle\r\nimport os\r\nimport time\r\nimport sys\r\nfrom discord import Game\r\n\r\nclient = discord.Client()\r\n\r\n@client.event\r\nasync def on_ready():\r\n print('Logged in as')\r\n print(client.user.name)\r\n print(client.user.id)\r\n print('------')\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n\r\n\t#heads or tails\r\n\tif message.content.upper().startswith('!FLIP'):\r\n\t\tflip = random.choice(['heads', 'tails'])\r\n\t\tawait client.send_message(message.channel, flip)\r\n\r\n\t#display thots NSFW (limited to only mods and admins)\r\n\telif message.content.upper().startswith('!THOT'):\r\n\t\tif message.author.id == \"140582689651687424\" or message.author.id == \"239907769317195777\" or message.author.id == \"166392135699005441\" or message.author.id == \"140374213042110464\":\r\n\t\t\tthot = random.choice(['https://f4.bcbits.com/img/a4093565357_10.jpg', 'https://imgur.com/AJ1ybgC', ': https://cdn.discordapp.com/attachments/255678709393129472/373487680719421441/20446197_1489906441074593_1780441816_o.jpg', \r\n\t\t\t\t'https://cdn.discordapp.com/attachments/255678709393129472/373486342153306112/19576202_1463637137034857_776903082_n.gif', ': https://giant.gfycat.com/AdolescentCandidCoypu.webm', \r\n\t\t\t\t'http://i.imgur.com/NU5jC1l.jpg', 'https://cdn.discordapp.com/attachments/376887816581414922/376889614574813186/BOOTY.jpg',\r\n\t\t\t\t'http://naijaultimate.com/wp-content/uploads/2017/08/Nicki-Minaj-MTV.jpg', 'https://gfycat.com/DarlingInconsequentialAustralianshelduck',\r\n\t\t\t\t'https://cdn.discordapp.com/attachments/316399433736650752/351902439739490305/Nicki-Minaj-and-her-nipples-5.jpg', \r\n\t\t\t\t'https://cdn.discordapp.com/attachments/316399433736650752/347940166658424843/16123113_1807447696139564_7777549271787634688_n.png', 'http://i.imgur.com/1RzCaQ9.jpg',\r\n\t\t\t\t'https://cdn.discordapp.com/attachments/316399433736650752/327319394294431744/20170621_222956.png', 'https://cdn.discordapp.com/attachments/316399433736650752/327319394294431744/20170621_222956.png',\r\n\t\t\t\t'https://cdn.discordapp.com/attachments/316399433736650752/336752102711885824/unknown.png', 'http://www.gotceleb.com/wp-content/uploads/photos/danielle-bregoli/out-for-lunch-in-beverly-hills/Danielle-Bregoli-out-for-lunch--02.jpg',\r\n\t\t\t\t'https://cdn.discordapp.com/attachments/316399433736650752/334875903387762688/fft20_mf5544431.png', 'https://cdn.discordapp.com/attachments/316399433736650752/334195557792743435/image.jpg',\r\n\t\t\t\t'http://s8.favim.com/610/150227/black-beauty-black-girls-cool-girl-Favim.com-2515598.jpg', 'https://i.imgur.com/4aiX25b.jpg', 'http://ic.pics.livejournal.com/dringen/63450085/20640/20640_600.jpg', \r\n\t\t\t\t'https://i.imgur.com/0pgjkR2r.jpg', 'http://cdn2.cagepotato.com/wp-content/uploads/gallery/jennifer-nguyen/jennifer-nguyen-photos-sexy-ufc-ring-girl-01.jpg'])\r\n\t\t\tawait client.send_message(message.channel, thot)\r\n\t\telse:\r\n\t\t\tawait client.send_message(message.channel, \"Sorry, you must be an admin or moderator to use that command OR have special permission.\")\r\n\r\n\r\n\t#I do not associate with \r\n\telif message.content.upper().startswith('NIGGER'):\r\n\t\tuserID = message.author.id\r\n\t\tawait client.send_message(message.channel, \"<@%s> Do NOT be racial on my server.\" % (userID))\r\n\r\n\t#quote someone and store all quotes in quote_file.pk1\r\n\telif message.content.upper().startswith('!ADDQUOTE'):\r\n\t\tif not os.path.isfile(\"quote_file.pk1\"):\r\n\t\t\tdisc_quotes = []\r\n\t\telse:\r\n\t\t\twith open(\"quote_file.pk1\", \"rb\") as quote_file:\r\n\t\t\t\tdisc_quotes = pickle.load(quote_file) \r\n\t\tdisc_quotes.append(message.content[9:])\r\n\t\tuserID = message.author.id\r\n\t\tawait client.send_message(message.channel, \"<@%s> Your quote has been added :D\" % (userID))\r\n\t\twith open(\"quote_file.pk1\", \"wb\") as quote_file:\r\n\t\t\tpickle.dump(disc_quotes, quote_file)\r\n\r\n\t#display quote from quote_file.pk1\r\n\telif message.content.upper().startswith(\"!QUOTE\"):\r\n\t\twith open(\"quote_file.pk1\", \"rb\") as quote_file:\r\n\t\t\t\tdisc_quotes = pickle.load(quote_file)\r\n\t\tawait client.send_message(message.channel, random.choice(disc_quotes))\r\n\r\n\r\n#changes playing message to greet the newest member\tand sends a PM to whomever joined! \r\n@client.event\r\nasync def on_member_join(member):\r\n await client.change_presence(game=discord.Game(name='Hi %s' % (member)))\r\n await client.send_message(member, \"Hi %s, Welcome to Clark's Discord Server! Clark's server is fairly NSFW; just a warning. Enjoy your stay :)\" % (member))\r\n\r\n#changes playing message to a good bye\r\n@client.event\r\nasync def on_member_remove(member):\r\n await client.change_presence(game=discord.Game(name='Bye %s' % (member)))\r\n\t\r\n\r\n#needed to run bot \r\nclient.run('NDA2NjA5OTgzOTQyMTY0NDgw.DU7JXQ.myF4O104lZBGNYCsBLDeE9qQ6us')\r\n", "sub_path": "clark.py", "file_name": "clark.py", "file_ext": "py", "file_size_in_byte": 4665, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "discord.Client", "line_number": 10, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 25, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 58, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 63, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 68, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 69, "usage_type": "call"}, {"api_name": "discord.Game", "line_number": 75, "usage_type": "call"}, {"api_name": "discord.Game", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "444291178", "text": "import json\nimport os\nimport threading\n\nfrom lib.RemoteNode import RemoteNode\nfrom lib.commons.Utilities import progressBar\nfrom lib.core import autopsy_globals\nfrom lib.core.autopsy_globals import autopsy_logger\n\n__author__ = 'joshisk'\n\n\nclass TestbedNotFoundException(Exception):\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return self.value\n\n\nclass Testbed:\n def __init__(self, tbContent):\n self.tbname = None\n self.tbFileName = None\n self.userid = None\n self.tenantid = None\n self.emailid = None\n self.host = None\n\n self.no_download_logs = False\n self.__im1_lock = threading.RLock()\n\n autopsy_logger.info(\"Building testbed\")\n if type(tbContent) is dict:\n self.parse_json(tbContent)\n self.tbFileName = self.tbname\n elif type(tbContent) is str:\n self.tbFileName = os.path.basename(tbContent)\n try:\n tbfile = open(tbContent)\n except IOError as e:\n autopsy_logger.critical(\"Testbed file doesn't exist: {0}\".format(tbContent))\n raise TestbedNotFoundException(\"Testbed file doesn't exist: {0}\".format(tbContent))\n try:\n self.parse_json(json.loads((tbfile.read())))\n except ValueError as e:\n autopsy_logger.critical(\"Testbed JSON file is not well formatted. Please check and try again\")\n raise e\n\n # self.createDeviceLogDir()\n\n def createDeviceLogDir(self):\n try:\n os.makedirs(autopsy_globals.autopsy_logloc + \"/\" + self.tbname + \"/\")\n except OSError as e:\n if \"File exists\" in e.message:\n autopsy_globals.autopsy_logger.debug(\"Dir already exists, continuing..\")\n else:\n autopsy_globals.autopsy_logger.critical(\"Error creating directory: \" + e.message)\n\n def openConnections(self):\n if not autopsy_globals.autopsy_quick_run:\n if self.host:\n map(lambda node: node.connect() if node else \"\", self.host)\n\n return True\n\n def parse_json(self, json_dict):\n hosts = json_dict['host'] if 'host' in json_dict else []\n\n self.tbname = json_dict['tbname'] if 'tbname' in json_dict else \"My_Testbed\"\n self.host = []\n\n for l_host in hosts:\n self.host.append(RemoteNode(hostname=l_host['name'],\n ipAddress=l_host['ip'],\n pkeyFile=l_host['key'] if 'key' in l_host else None,\n username=l_host['username'] if 'username' in l_host else 'ubuntu',\n password=l_host['password'] if 'password' in l_host else None,\n alias=l_host['alias'],\n ssh_port=int(l_host['ssh_port'] if 'ssh_port' in l_host else 22)))\n\n def __del__(self):\n if autopsy_globals is None:\n return\n self.close_connections(quick=autopsy_globals.autopsy_quick_run)\n\n def close_connections(self, quick=False):\n autopsy_logger.debug(\"Closing all connections of testbed\")\n\n if not (quick or self.no_download_logs):\n autopsy_logger.info(\"Downloading all logs, please wait....\")\n\n progressBar(0, 1)\n if self.host:\n map(lambda node: node.download_all_logs(autopsy_globals.autopsy_logfile), self.host)\n progressBar(1, 1)\n\n if self.host:\n map(lambda node: node.disconnect() if node else \"\", self.host)\n", "sub_path": "lib/Testbed.py", "file_name": "Testbed.py", "file_ext": "py", "file_size_in_byte": 3661, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "threading.RLock", "line_number": 31, "usage_type": "call"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger.info", "line_number": 33, "usage_type": "call"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger", "line_number": 33, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger.critical", "line_number": 42, "usage_type": "call"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger", "line_number": 42, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 45, "usage_type": "call"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger.critical", "line_number": 47, "usage_type": "call"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger", "line_number": 47, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 54, "usage_type": "call"}, {"api_name": "lib.core.autopsy_globals.autopsy_logloc", "line_number": 54, "usage_type": "attribute"}, {"api_name": "lib.core.autopsy_globals", "line_number": 54, "usage_type": "name"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger.debug", "line_number": 57, "usage_type": "call"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger", "line_number": 57, "usage_type": "attribute"}, {"api_name": "lib.core.autopsy_globals", "line_number": 57, "usage_type": "name"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger.critical", "line_number": 59, "usage_type": "call"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger", "line_number": 59, "usage_type": "attribute"}, {"api_name": "lib.core.autopsy_globals", "line_number": 59, "usage_type": "name"}, {"api_name": "lib.core.autopsy_globals.autopsy_quick_run", "line_number": 62, "usage_type": "attribute"}, {"api_name": "lib.core.autopsy_globals", "line_number": 62, "usage_type": "name"}, {"api_name": "lib.RemoteNode.RemoteNode", "line_number": 75, "usage_type": "call"}, {"api_name": "lib.core.autopsy_globals", "line_number": 84, "usage_type": "name"}, {"api_name": "lib.core.autopsy_globals.autopsy_quick_run", "line_number": 86, "usage_type": "attribute"}, {"api_name": "lib.core.autopsy_globals", "line_number": 86, "usage_type": "name"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger.debug", "line_number": 89, "usage_type": "call"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger", "line_number": 89, "usage_type": "name"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger.info", "line_number": 92, "usage_type": "call"}, {"api_name": "lib.core.autopsy_globals.autopsy_logger", "line_number": 92, "usage_type": "name"}, {"api_name": "lib.commons.Utilities.progressBar", "line_number": 94, "usage_type": "call"}, {"api_name": "lib.core.autopsy_globals.autopsy_logfile", "line_number": 96, "usage_type": "attribute"}, {"api_name": "lib.core.autopsy_globals", "line_number": 96, "usage_type": "name"}, {"api_name": "lib.commons.Utilities.progressBar", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "89295171", "text": "import numpy as np\nimport cv2\n\ndef camera_in():\n \"\"\"\n 카메라 장치를 사용하는 함수\n 카메라로부터 받아온 매 프레임에 대해 반전 영상 생성\n \"\"\"\n cap = cv2.VideoCapture(0) # 컴퓨터에 연결되어 있는 기본 카메라 사용: 0\n\n # 카메라 장치 사용 가능여부 확인\n if not cap.isOpened():\n print(\"Camera open failed\")\n return\n # CAP_PROP_FRAME_WIDTH: 비디오 프레임의 가로 크기\n print(\"Frame width:\", int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))\n #CAP_PROP_FRAME_HEIGHT: 비디오 프레임의 세로 크기\n print(\"Frame height\", int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n\n while True:\n ret, frame = cap.read() # 비디오의 한 프레임씩 읽기\n # ret: 프레임 읽기 성공(True) 실패(False)\n # frame: 읽은 프레임\n if not ret:\n break\n\n inversed = ~frame # 프레임 반전\n\n cv2.imshow('frame', frame)\n cv2.imshow('inversed', inversed)\n\n # 10: 10초 ���안 대기\n # 27: ESC\n if cv2.waitKey(10) == 27:\n break\n\n cv2.destroyAllWindows()\n\ndef video_in():\n \"\"\"\n 동영상을 입력으로 받아 반전해 출력하는 함수\n \"\"\"\n cap = cv2.VideoCapture('stopwatch.avi')\n\n if not cap.isOpened():\n print(\"Video open failed\")\n return\n\n print(\"Frame width:\", int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))\n print(\"Frame height:\", int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n # CAP_PROP_FRAME_COUNT: 비디오 파일의 전체 프레임 수\n print(\"Frame count:\", int(cap.get(cv2.CAP_PROP_FRAME_COUNT)))\n\n # CAP_PROP_FPS: 초당 프레임 수\n fps = cap.get(cv2.CAP_PROP_FPS)\n print(\"FPS:\", fps)\n\n # fps 값으로부터 각 프레임 사이의 시간 간격 delay(밀리초 단위)\n delay = round(1000 / fps)\n\n while True:\n ret, frame = cap.read()\n if not ret:\n break\n\n inversed = ~frame\n\n cv2.imshow('frame', frame)\n cv2.imshow('inverse', inversed)\n\n if cv2.waitKey(delay) == 27:\n break\n\n cv2.destroyAllWindows()\n\ndef camera_in_video_out():\n \"\"\"\n 카메라로 프레임을 받아와 반전 시킨 후,\n 동영상 파일로 저장 하는 함수\n \"\"\"\n cap = cv2.VideoCapture(0)\n\n if not cap.isOpened():\n print('Camera open failed!')\n return\n\n w = round(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n # https://www.fourcc.org/codecs.php 참고\n # DIVX: DivX MPEG-4 코덱\n fourcc = cv2.VideoWriter_fourcc(*'DIVX') # *'DIVX' == 'D', 'I', 'V', 'X'\n delay = round(1000 / fps)\n\n outputVideo = cv2.VideoWriter('output.avi', fourcc, fps, (w, h))\n\n if not outputVideo.isOpened():\n print(\"File open failed!\")\n return\n\n while True:\n ret, frame = cap.read()\n\n if not ret:\n break\n\n inversed = ~frame\n\n outputVideo.write(inversed)\n\n cv2.imshow('frame', frame)\n cv2.imshow('inversed', inversed)\n\n if cv2.waitKey(delay) == 27:\n break\n\n cv2.destroyAllWindows\n\n\nif __name__ == '__main__':\n camera_in()\n #video_in()\n # amera_in_video_out()", "sub_path": "ch4/video.py", "file_name": "video.py", "file_ext": "py", "file_size_in_byte": 3273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cv2.VideoCapture", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 49, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 50, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 52, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 87, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 88, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 89, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 93, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 96, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 112, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 118, "usage_type": "attribute"}]} +{"seq_id": "193671368", "text": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom .models import Booktitle, Bookitem, CircAccount\n\n# Functions need import\nfrom django.contrib import messages\nimport datetime\n\n# Authentication\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import login, logout\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\ndef home(request):\n return render(request, 'home.html')\n\ndef login_page(request):\n form = AuthenticationForm()\n context = {\n 'form': form\n }\n if request.method == \"POST\":\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n user = form.get_user()\n login(request, user)\n return HttpResponseRedirect('/viewrecord')\n return render(request, 'login.html', context)\n\ndef logout_func(request):\n logout(request)\n return HttpResponseRedirect(\"/\")\n\n@login_required(login_url=\"/login\")\ndef view_record(request):\n username = request.user\n items = Bookitem.objects.filter(current_user=username)\n user_acc = CircAccount.objects.get(user=username)\n\n # numer of borrowing items\n num = len(items)\n\n for item in items:\n item.display_message = \" \"\n n = datetime.date.today() - item.duedate\n if (n.days) > 0:\n item.display_message = \"Overdue {} day(s)\".format(n.days)\n\n context = {\n 'items':items,\n 'user_acc':user_acc,\n 'num': num,\n }\n return render(request, 'account.html', context)\n\n@login_required(login_url=\"/login\")\ndef renew_books(request):\n barcode = request.GET.get('barcode')\n item = Bookitem.objects.get(barcode=barcode)\n\n # get new day prepared for update\n new_duedate = datetime.date.today() + datetime.timedelta(days=14)\n\n # get number of day before overdue\n n = item.duedate - datetime.date.today()\n\n # check if renewed today\n if (item.duedate == new_duedate):\n print(\"two dates are equal\")\n messages.warning(request, '\"{}\" has been renewed today'.format(item.title))\n return HttpResponseRedirect(\"/viewrecord\")\n elif (n.days > 10):\n print(\"Too early\")\n messages.warning(request, 'Too early to renew: \"{}\"'.format(item.title))\n return HttpResponseRedirect(\"/viewrecord\")\n else:\n # add fine if\n days_of_overdue = datetime.date.today() - item.duedate\n print(days_of_overdue.days)\n if (days_of_overdue.days > 0):\n username = request.user\n user_acc = CircAccount.objects.get(user=username)\n user_acc.fine += int(days_of_overdue.days)\n user_acc.save()\n\n\n\n item.duedate = new_duedate\n item.renewal += 1\n item.save()\n return HttpResponseRedirect(\"/viewrecord\")\n\ndef book_record(request, id):\n book = Booktitle.objects.get(id=id)\n\n # convert to subject list\n subject_column = Booktitle._meta.get_field(\"subjects\")\n subject_data = subject_column.value_from_object(book)\n subject_list = subject_data.split(\"||\")\n\n # convert to isbn list\n isbn_column = Booktitle._meta.get_field(\"isbn\")\n isbn_data = isbn_column.value_from_object(book)\n isbn_list = isbn_data.split(\"||\")\n\n # check if edition = null\n if book.edition == None:\n book.edition = \"\"\n\n # get book items\n # parameter 'title' is linked to Booktitle\n items = Bookitem.objects.filter(title=id)\n\n # check if checked-out\n\n\n context = {\n 'book':book,\n 'subjects':subject_list,\n 'isbn':isbn_list,\n 'items':items\n }\n return render(request, 'book_record.html', context)\n\ndef searchBySubject(request, subject):\n books = Booktitle.objects.filter(subjects__contains=subject)\n num = len(books)\n context = {\n 'num': num,\n 'keyword':subject,\n 'books':books,\n }\n return render(request, 'search.html', context)\n\n\ndef searchBar(request):\n search_option = request.GET.get('search_option')\n search_words = request.GET.get('search_words')\n\n if (search_option==\"title\"):\n print(\"title search conducted\")\n books = Booktitle.objects.filter(title__icontains=search_words)\n if (search_option==\"author\"):\n print(\"author search conducted\")\n books = Booktitle.objects.filter(author__icontains=search_words)\n if (search_option==\"subject\"):\n print(\"subject search conducted\")\n books = Booktitle.objects.filter(subjects__icontains=search_words)\n if (search_option==\"isbn\"):\n print(\"isbn search conducted\")\n books = Booktitle.objects.filter(isbn__icontains=search_words)\n# if (search_option==\"any_words\"):\n# print(\"any_words search conducted\")\n# books = Booktitle.objects.filter(series__icontains=search_words)\n\n\n num = len(books)\n context = {\n 'num': num,\n 'keyword':search_words,\n 'books':books,\n }\n if (num == 1):\n for bk in books:\n return book_record(request, bk.id)\n return render(request, 'search.html', context)\n\n\ndef explain(request):\n return render(request, 'explain.html')", "sub_path": "catalogue/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5137, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 27, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 28, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 32, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Bookitem.objects.filter", "line_number": 38, "usage_type": "call"}, {"api_name": "models.Bookitem.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.Bookitem", "line_number": 38, "usage_type": "name"}, {"api_name": "models.CircAccount.objects.get", "line_number": 39, "usage_type": "call"}, {"api_name": "models.CircAccount.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.CircAccount", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Bookitem.objects.get", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Bookitem.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models.Bookitem", "line_number": 60, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 63, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 66, "usage_type": "attribute"}, {"api_name": "django.contrib.messages.warning", "line_number": 71, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 71, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 72, "usage_type": "call"}, {"api_name": "django.contrib.messages.warning", "line_number": 75, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 75, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.CircAccount.objects.get", "line_number": 83, "usage_type": "call"}, {"api_name": "models.CircAccount.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.CircAccount", "line_number": 83, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 92, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Booktitle.objects.get", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Booktitle.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "models.Booktitle", "line_number": 95, "usage_type": "name"}, {"api_name": "models.Booktitle._meta.get_field", "line_number": 98, "usage_type": "call"}, {"api_name": "models.Booktitle._meta", "line_number": 98, "usage_type": "attribute"}, {"api_name": "models.Booktitle", "line_number": 98, "usage_type": "name"}, {"api_name": "models.Booktitle._meta.get_field", "line_number": 103, "usage_type": "call"}, {"api_name": "models.Booktitle._meta", "line_number": 103, "usage_type": "attribute"}, {"api_name": "models.Booktitle", "line_number": 103, "usage_type": "name"}, {"api_name": "models.Bookitem.objects.filter", "line_number": 113, "usage_type": "call"}, {"api_name": "models.Bookitem.objects", "line_number": 113, "usage_type": "attribute"}, {"api_name": "models.Bookitem", "line_number": 113, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 124, "usage_type": "call"}, {"api_name": "models.Booktitle.objects.filter", "line_number": 127, "usage_type": "call"}, {"api_name": "models.Booktitle.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "models.Booktitle", "line_number": 127, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 134, "usage_type": "call"}, {"api_name": "models.Booktitle.objects.filter", "line_number": 143, "usage_type": "call"}, {"api_name": "models.Booktitle.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "models.Booktitle", "line_number": 143, "usage_type": "name"}, {"api_name": "models.Booktitle.objects.filter", "line_number": 146, "usage_type": "call"}, {"api_name": "models.Booktitle.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "models.Booktitle", "line_number": 146, "usage_type": "name"}, {"api_name": "models.Booktitle.objects.filter", "line_number": 149, "usage_type": "call"}, {"api_name": "models.Booktitle.objects", "line_number": 149, "usage_type": "attribute"}, {"api_name": "models.Booktitle", "line_number": 149, "usage_type": "name"}, {"api_name": "models.Booktitle.objects.filter", "line_number": 152, "usage_type": "call"}, {"api_name": "models.Booktitle.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "models.Booktitle", "line_number": 152, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 167, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "19655595", "text": "import numpy as np\nimport plotly.graph_objects as go\n\n\ndef v0(x):\n return np.square(x) / 10\n\n\ndef v1(x):\n return np.abs(x)\n\n\ndef v2(x):\n return 12 * np.power(x / 10, 4) - np.power(x, 2) / 18 + x / 8 + 13/10\n\n\ndef v3(x):\n return 8 * np.abs(np.abs(np.abs(x) - 1) - 1)\n\n\ndef plot_eigenvectors(potential_name,\n eigenvectors,\n eigenvalues,\n xs,\n v_xs):\n\n plot_data = [go.Scatter(x=xs, y=v_xs, name=potential_name)]\n max_energy = 0\n for i, (eigenvalue, eigenvector) in enumerate(zip(eigenvalues, eigenvectors)):\n max_energy = max(max_energy, np.max(eigenvector) + eigenvalue)\n\n # add horizontal lines\n plot_data.append(go.Scatter(\n x=xs,\n y=np.full_like(eigenvector, fill_value=eigenvalue),\n mode='lines',\n line=dict(color='black'),\n showlegend=False,\n legendgroup=i,\n ))\n\n # add trace\n plot_data.append(go.Scatter(\n x=xs,\n y=eigenvector + eigenvalue,\n name='E_{}: {}'.format(i+1, str(round(eigenvalue, 6))),\n legendgroup=i,\n ))\n\n layout = go.Layout(\n title=potential_name,\n xaxis=dict(title='x'),\n yaxis=dict(title='Energy', range=[0, 1.1*max_energy]))\n\n fig = go.Figure(data=plot_data, layout=layout)\n fig.show()\n\n\ndef construct_matrix(num_points, potential_fn):\n # construct d^2 Psi / dx^2 + v(x) matrix\n matrix = -np.zeros(shape=(num_points - 2, num_points - 2))\n\n # add -d^2 Psi / dx^2\n matrix[np.diag_indices_from(matrix)] = 2 / dx_squared\n diag = np.diagonal(matrix, 1)\n diag.setflags(write=True)\n diag.fill(-1 / dx_squared)\n diag = np.diagonal(matrix, -1)\n diag.setflags(write=True)\n diag.fill(-1 / dx_squared)\n\n # add v(x)\n v_xs = potential_fn(xs[1:-1])\n matrix[np.diag_indices_from(matrix)] += v_xs\n\n return matrix, v_xs\n\n\ndef construct_n_smallest_eigenpairs(matrix, n):\n\n eigenvalues, eigenvectors = np.linalg.eig(matrix)\n smallest_to_largest_idx = np.argsort(eigenvalues)\n eigenvalues = eigenvalues[smallest_to_largest_idx]\n eigenvectors = eigenvectors[:, smallest_to_largest_idx]\n return eigenvalues[:n], eigenvectors[:, :n]\n\n\npotentials = {\n 'f(x) = x^2/10': v0,\n 'f(x) = |x|': v1,\n 'f(x) = 12(x/10)^4 - x^2/18 + x/8 + 13/10': v2,\n 'f(x) = 8|||x|-1|-1|': v3}\n\nboundary = [-12, 12]\nboundary_conditions = [0, 0]\nnum_points = 1921\nxs = np.linspace(start=boundary[0], stop=boundary[1], num=num_points)\ndx = xs[1] - xs[0]\ndx_squared = np.square(dx)\n\n\n# for potential_name, potential_fn in potentials.items():\n#\n# matrix, v_xs = construct_matrix(num_points=num_points, potential_fn=potential_fn)\n#\n# n_smallest_eigenvalues, n_smallest_eigenvectors = construct_n_smallest_eigenpairs(\n# matrix=matrix, n=5)\n#\n# # plot eigenvectors\n# plot_eigenvectors(\n# potential_name=potential_name,\n# eigenvectors=n_smallest_eigenvectors.T,\n# eigenvalues=n_smallest_eigenvalues,\n# xs=xs,\n# v_xs=v_xs)\n\n\n# Part B\ndef composite_simpson_rule(y, dx):\n integral = 0\n for i in range(len(y)):\n if (i == 0) or (i == (len(y) - 1)):\n w = 1\n elif i % 2 == 1:\n w = 4\n elif i % 2 == 0:\n w = 2\n else:\n raise ValueError('This case should not be possible!')\n integral += w * y[i]\n return (dx / 3) * integral\n\n\nmatrix, v_xs = construct_matrix(num_points=num_points, potential_fn=potentials['f(x) = 12(x/10)^4 - x^2/18 + x/8 + 13/10'])\n\nn_smallest_eigenvalues, n_smallest_eigenvectors = construct_n_smallest_eigenpairs(\n matrix=matrix, n=5)\n\nfirst_index_greater_than_0 = np.argmax(xs > 0)\nlast_index_great_than_6 = np.argmax(xs > 6)\n\nfor i, eigenvector in enumerate(n_smallest_eigenvectors.T):\n sqrd_eigenvector = np.square(eigenvector)\n total_area = composite_simpson_rule(y=sqrd_eigenvector, dx=dx)\n subrange_area = composite_simpson_rule(\n y=sqrd_eigenvector[first_index_greater_than_0:last_index_great_than_6],\n dx=dx)\n print('Probability in Eigenmode {}: {}'.format(i+1, round(subrange_area / total_area, 4)))\n", "sub_path": "hw5/3.py", "file_name": "3.py", "file_ext": "py", "file_size_in_byte": 4231, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.square", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 18, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 27, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 30, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 33, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.full_like", "line_number": 35, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 43, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 43, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Layout", "line_number": 50, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 50, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 55, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.diag_indices_from", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.diagonal", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.diagonal", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.diag_indices_from", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.argsort", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "238957975", "text": "import cv2\nimport math\nimport numpy as np\nfrom collections import OrderedDict\nfrom shapely.geometry import Point\nfrom shapely.geometry.polygon import Polygon\n\n\nclass ObjectTracker():\n def __init__(self, id, detect_obj, max_lost):\n # detect_obj: {\"class_name\": cls, \"score\": score, \"bbox\": bbox, \"color\": COLOR[cls]}\n bbox = detect_obj['bbox']\n color = detect_obj['color']\n cls = detect_obj['class_name']\n self.bboxes = []\n self.centers = []\n self.lost_time = 0\n self.max_lost = max_lost\n self.tracker_id = id\n self.color = color\n self.class_name = cls\n self.update_loc(bbox)\n self.is_counted = 0\n print('Create ObjectTracker')\n\n def update_loc(self, bbox):\n if len(bbox) == 0:\n self.lost_time += 1\n else:\n self.bboxes.append(bbox)\n self.centers.append([(bbox[0] + bbox[2])//2, (bbox[1] + bbox[3])//2])\n self.lost_time = 0\n\n def draw_tracking(self, img):\n bbox = self.bboxes[-1]\n if self.lost_time == 0:\n cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), self.color)\n cv2.putText(img, \"{}\".format(self.tracker_id), (bbox[0], bbox[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6,\n self.color)\n # else:\n # cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 0))\n\n return img\n\n def distance(self, img, detect_obj):\n # detect_obj: {\"class_name\": cls, \"score\": score, \"bbox\": bbox, \"color\": COLOR[cls]}\n d = float(\"INF\")\n color = detect_obj['color']\n if color == self.color:\n bbox = detect_obj['bbox']\n center = ([(bbox[0] + bbox[2])//2, (bbox[1] + bbox[3])//2])\n dx = (center[0] - self.centers[-1][0])*1.0/img.shape[1]\n dy = (center[1] - self.centers[-1][1])*1.0/img.shape[0]\n d = math.fabs(dx) + 2 * math.fabs(dy)\n return d\n\n def lost(self, img=None):\n # for vehicle counting only\n if img is not None:\n if self.bboxes[-1][3] >= img.shape[0] * 0.9:\n self.lost_time = self.max_lost + 10\n return self.lost_time >= self.max_lost\n\n def do_count(self, counter_area):\n if self.is_counted or self.lost_time > 0 or len(counter_area) == 0:\n return self.class_name, 0\n obj_point = Point((self.bboxes[-1][0] + self.bboxes[-1][2])//2, self.bboxes[-1][3])\n polygon = Polygon(counter_area)\n if polygon.contains(obj_point):\n self.is_counted = True\n return self.class_name, 1\n else:\n return self.class_name, 0\n\n @staticmethod\n def create_tracker_by_name(tracker_type):\n tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']\n # Create a tracker based on tracker name\n if tracker_type == tracker_types[0]:\n tracker = cv2.TrackerBoosting_create()\n elif tracker_type == tracker_types[1]:\n tracker = cv2.TrackerMIL_create()\n elif tracker_type == tracker_types[2]:\n tracker = cv2.TrackerKCF_create()\n elif tracker_type == tracker_types[3]:\n tracker = cv2.TrackerTLD_create()\n elif tracker_type == tracker_types[4]:\n tracker = cv2.TrackerMedianFlow_create()\n elif tracker_type == tracker_types[5]:\n tracker = cv2.TrackerGOTURN_create()\n elif tracker_type == tracker_types[6]:\n tracker = cv2.TrackerMOSSE_create()\n elif tracker_type == tracker_types[7]:\n tracker = cv2.TrackerCSRT_create()\n else:\n tracker = None\n print('Incorrect tracker name')\n print('Available trackers are:')\n for t in tracker_types:\n print(t)\n\n return tracker\n\n\nclass MultiObjectTracking():\n def __init__(self, max_lost=30, max_relative_distance=0.1):\n \"\"\"\n Create multi object tracker\n :param max_lost: Number frame the object did not appear before delete\n :param max_relative_distance: The max distance of same object (relative with width, height)\n \"\"\"\n self.list_tracker = OrderedDict()\n self.next_tracker_id = 0\n self.max_lost = max_lost\n self.max_relative_distance = max_relative_distance\n print('MultiObjectTracking')\n\n def update(self, img, list_det_res):\n # if there is no detected object --> all tracker lost\n if len(list_det_res) == 0:\n for tracker_id, tracker in self.list_tracker.items():\n tracker.update_loc([])\n if tracker.lost(img):\n self.de_register(tracker_id)\n return self.list_tracker\n\n # if the first time --> register all detected object as new tracker\n if len(self.list_tracker) == 0:\n for detect_obj in list_det_res:\n self.register(detect_obj)\n return self.list_tracker\n\n list_tracker_id = list(self.list_tracker.keys())\n\n # Get distance between tracker and detected object\n D = np.ones((len(self.list_tracker), len(list_det_res)), dtype=np.float)\n for y, tracker_id in enumerate(self.list_tracker.keys()):\n for x, detect_obj in enumerate(list_det_res):\n D[y][x] = self.list_tracker[tracker_id].distance(img, detect_obj)\n\n rows = D.min(axis=1).argsort()\n cols = D.argmin(axis=1)[rows]\n\n used_rows = set()\n used_cols = set()\n\n # loop over the combination of the (row, column) index tuples\n for (row, col) in zip(rows, cols):\n if row in used_rows or col in used_cols:\n continue\n\n # if distance between object and tracker too far --> ignore matching\n if D[row][col] >= self.max_relative_distance:\n continue\n\n obj_bbox = list_det_res[col]['bbox']\n tracker_id = list_tracker_id[row]\n self.list_tracker[tracker_id].update_loc(obj_bbox)\n used_rows.add(row)\n used_cols.add(col)\n\n # get all row(tracker) and column(object) index which are not examined (lost/ new tracker)\n un_used_tracker = set(range(0, len(self.list_tracker))).difference(used_rows)\n un_used_object = set(range(0, len(list_det_res))).difference(used_cols)\n\n # update and check lost tracker\n for index in un_used_tracker:\n tracker_id = list_tracker_id[index]\n self.list_tracker[tracker_id].update_loc([])\n if self.list_tracker[tracker_id].lost(img):\n self.de_register(tracker_id)\n\n # register new tracker\n for index in un_used_object:\n self.register(list_det_res[index])\n\n return self.list_tracker\n\n def register(self, detect_obj):\n tracker = ObjectTracker(self.next_tracker_id, detect_obj, self.max_lost)\n self.list_tracker[self.next_tracker_id] = tracker\n self.next_tracker_id += 1\n\n def de_register(self, tracker_id):\n del self.list_tracker[tracker_id]\n\n def draw_tracking(self, img):\n for tracker_id, tracker in self.list_tracker.items():\n img = tracker.draw_tracking(img)\n return img\n\n def count_object(self, counter_area, count_result):\n for tracker_id, tracker in self.list_tracker.items():\n cls, should_count = tracker.do_count(counter_area)\n if count_result.__contains__(cls):\n count_result[cls] += should_count\n else:\n count_result[cls] = should_count\n return count_result\n\n\n", "sub_path": "ITS/obj_tracking.py", "file_name": "obj_tracking.py", "file_ext": "py", "file_size_in_byte": 7655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cv2.rectangle", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 38, "usage_type": "attribute"}, {"api_name": "math.fabs", "line_number": 54, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 67, "usage_type": "call"}, {"api_name": "shapely.geometry.polygon.Polygon", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.TrackerBoosting_create", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.TrackerMIL_create", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.TrackerKCF_create", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.TrackerTLD_create", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.TrackerMedianFlow_create", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.TrackerGOTURN_create", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.TrackerMOSSE_create", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.TrackerCSRT_create", "line_number": 94, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 136, "usage_type": "attribute"}]} +{"seq_id": "31377053", "text": "\"\"\"\nsetup.py file for building armstrong components.\n\nNothing in this file should need to be edited, please see accompanying\npackage.json file if you need to adjust metadata about this package.\n\"\"\"\n\nfrom distutils.core import setup\nimport json\nimport os\n\ninfo = json.load(open(\"./package.json\"))\n\n\ndef convert_to_str(d):\n \"\"\"\n Recursively convert all values in a dictionary to strings\n\n This is required because setup() does not like unicode in\n the values it is supplied.\n \"\"\"\n d2 = {}\n for k, v in d.items():\n k = str(k)\n if type(v) in [list, tuple]:\n d2[k] = [str(a) for a in v]\n elif type(v) is dict:\n d2[k] = convert_to_str(v)\n else:\n d2[k] = str(v)\n return d2\n\ninfo = convert_to_str(info)\nNAMESPACE_PACKAGES = []\n\n\n# TODO: simplify this process\ndef generate_namespaces(package):\n new_package = \".\".join(package.split(\".\")[0:-1])\n if new_package.count(\".\") > 0:\n generate_namespaces(new_package)\n NAMESPACE_PACKAGES.append(new_package)\ngenerate_namespaces(info[\"name\"])\n\n\nif os.path.exists(\"MANIFEST\"):\n os.unlink(\"MANIFEST\")\n\n# Borrowed and modified from django-registration\n# Compile the list of packages available, because distutils doesn't have\n# an easy way to do this.\npackages, data_files = [], []\nroot_dir = os.path.dirname(__file__)\nif root_dir:\n os.chdir(root_dir)\n\n\ndef build_package(dirpath, dirnames, filenames):\n # Ignore dirnames that start with '.'\n for i, dirname in enumerate(dirnames):\n if dirname.startswith('.'):\n del dirnames[i]\n if '__init__.py' in filenames and 'steps.py' not in filenames:\n pkg = dirpath.replace(os.path.sep, '.')\n if os.path.altsep:\n pkg = pkg.replace(os.path.altsep, '.')\n packages.append(pkg)\n elif filenames:\n # Strip off the length of the package name plus the trailing slash\n prefix = dirpath[len(info[\"name\"]) + 1:]\n for f in filenames:\n # Ignore all dot files and any compiled\n if f.startswith(\".\") or f.endswith(\".pyc\"):\n continue\n data_files.append(os.path.join(prefix, f))\n\n\n[build_package(dirpath, dirnames, filenames) for dirpath, dirnames, filenames\n in os.walk(info[\"name\"].replace(\".\", \"/\"))]\n\nsetup_kwargs = {\n \"author\": \"Bay Citizen & Texas Tribune\",\n \"author_email\": \"dev@armstrongcms.org\",\n \"url\": \"http://github.com/armstrong/%s/\" % info[\"name\"],\n \"packages\": packages,\n \"package_data\": {info[\"name\"]: data_files, },\n \"namespace_packages\": NAMESPACE_PACKAGES,\n \"classifiers\": [\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n}\n\nsetup_kwargs.update(info)\nsetup(**setup_kwargs)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2979, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "json.load", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 79, "usage_type": "call"}, {"api_name": "distutils.core.setup", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "95836671", "text": "import json\nimport time\nfrom datetime import datetime\n\nfrom celery import group\nfrom celery.utils.log import get_task_logger\nfrom django.conf import settings\nfrom django.db.models import F\nfrom ocldev.oclfleximporter import OclFlexImporter\nfrom pydash import compact, get\n\nfrom core.collections.models import Collection\nfrom core.common.constants import HEAD\nfrom core.common.services import RedisService\nfrom core.common.tasks import bulk_import_parts_inline\nfrom core.common.utils import drop_version\nfrom core.concepts.models import Concept\nfrom core.mappings.models import Mapping\nfrom core.orgs.models import Organization\nfrom core.sources.models import Source\nfrom core.users.models import UserProfile\n\nlogger = get_task_logger(__name__)\n\n\nclass ImportResults:\n def __init__(self, importer):\n self.json = json.loads(importer.import_results.to_json())\n self.detailed_summary = importer.import_results.get_detailed_summary()\n self.report = importer.import_results.display_report()\n\n def to_dict(self):\n return dict(\n json=self.json, detailed_summary=self.detailed_summary, report=self.report\n )\n\n\nclass BaseImporter:\n def __init__(\n self, content, username, update_if_exists, user=None, parse_data=True, set_user=True\n ): # pylint: disable=too-many-arguments\n self.input_list = []\n self.user = None\n self.result = None\n self.importer = None\n self.content = content\n self.username = username\n self.update_if_exists = update_if_exists\n if parse_data:\n self.populate_input_list()\n\n if set_user:\n self.set_user()\n if user:\n self.user = user\n\n def populate_input_list(self):\n if isinstance(self.content, list):\n self.input_list = self.content\n else:\n for line in self.content.splitlines():\n self.input_list.append(json.loads(line))\n\n def set_user(self):\n self.user = UserProfile.objects.get(username=self.username)\n\n def run(self):\n raise NotImplementedError()\n\n\nclass BulkImport(BaseImporter):\n def __init__(self, content, username, update_if_exists):\n super().__init__(content, username, update_if_exists)\n self.initialize_importer()\n\n def initialize_importer(self):\n self.importer = OclFlexImporter(\n input_list=self.input_list,\n api_url_root=settings.API_BASE_URL,\n api_token=self.user.get_token(),\n do_update_if_exists=self.update_if_exists\n )\n\n def run(self):\n self.importer.process()\n self.result = ImportResults(self.importer)\n\n return self.result.to_dict()\n\n\nCREATED = 1\nUPDATED = 2\nFAILED = 3\n\n\nclass BaseResourceImporter:\n mandatory_fields = set()\n allowed_fields = []\n\n def __init__(self, data, user, update_if_exists=False):\n self.user = user\n self.data = data\n self.update_if_exists = update_if_exists\n self.queryset = None\n\n def get(self, attr, default_value=None):\n return self.data.get(attr, default_value)\n\n def parse(self):\n self.data = self.get_filter_allowed_fields()\n self.data['created_by'] = self.data['updated_by'] = self.user\n\n def get_filter_allowed_fields(self):\n return {k: v for k, v in self.data.items() if k in self.allowed_fields}\n\n def is_valid(self):\n return self.mandatory_fields.issubset(self.data.keys())\n\n def get_owner_type(self):\n return self.get('owner_type', '').lower()\n\n def is_user_owner(self):\n return self.get_owner_type() == 'user'\n\n def is_org_owner(self):\n return self.get_owner_type() == 'organization'\n\n def get_owner_type_filter(self):\n if self.is_user_owner():\n return 'user__username'\n\n return 'organization__mnemonic'\n\n def get_owner(self):\n owner = self.get('owner')\n\n if self.is_org_owner():\n return Organization.objects.filter(mnemonic=owner).first()\n\n return UserProfile.objects.filter(username=owner).first()\n\n @staticmethod\n def exists():\n return False\n\n def clean(self):\n if not self.is_valid():\n return False\n if self.exists():\n return None\n\n self.parse()\n return True\n\n def run(self):\n is_clean = self.clean()\n if not is_clean:\n return is_clean\n\n return self.process()\n\n def process(self):\n raise NotImplementedError()\n\n\nclass OrganizationImporter(BaseResourceImporter):\n mandatory_fields = {'id', 'name'}\n allowed_fields = [\"id\", \"company\", \"extras\", \"location\", \"name\", \"public_access\", \"website\"]\n\n def exists(self):\n return Organization.objects.filter(mnemonic=self.get('id')).exists()\n\n def parse(self):\n super().parse()\n self.data['mnemonic'] = self.data.pop('id')\n\n def process(self):\n org = Organization.objects.create(**self.data)\n if org:\n return CREATED\n return FAILED\n\n\nclass SourceImporter(BaseResourceImporter):\n mandatory_fields = {'id', 'short_code', 'name', 'full_name', 'owner_type', 'owner', 'source_type'}\n allowed_fields = [\n \"id\", \"short_code\", \"name\", \"full_name\", \"description\", \"source_type\", \"custom_validation_schema\",\n \"public_access\", \"default_locale\", \"supported_locales\", \"website\", \"extras\", \"external_id\",\n ]\n\n def exists(self):\n return Source.objects.filter(\n **{self.get_owner_type_filter(): self.get('owner'), 'mnemonic': self.get('id')}\n ).exists()\n\n def parse(self):\n owner_type = self.get('owner_type').lower()\n owner = self.get_owner()\n\n super().parse()\n\n self.data['mnemonic'] = self.data.pop('id')\n self.data[owner_type] = owner\n self.data['version'] = 'HEAD'\n\n supported_locales = self.get('supported_locales')\n if isinstance(supported_locales, str):\n self.data['supported_locales'] = supported_locales.split(',')\n\n self.data.pop('short_code')\n\n def process(self):\n source = Source(**self.data)\n errors = Source.persist_new(source, self.user)\n return errors or CREATED\n\n\nclass SourceVersionImporter(BaseResourceImporter):\n mandatory_fields = {\"id\"}\n allowed_fields = [\"id\", \"external_id\", \"description\", \"released\"]\n\n def exists(self):\n return Source.objects.filter(\n **{self.get_owner_type_filter(): self.get('owner'),\n 'mnemonic': self.get('source'), 'version': self.get('id')}\n ).exists()\n\n def parse(self):\n owner_type = self.get('owner_type').lower()\n owner = self.get_owner()\n source = self.get('source')\n\n super().parse()\n\n self.data['version'] = self.data.pop('id')\n self.data['mnemonic'] = source\n self.data[owner_type] = owner\n\n def process(self):\n source = Source(**self.data)\n errors = Source.persist_new_version(source, self.user)\n return errors or UPDATED\n\n\nclass CollectionImporter(BaseResourceImporter):\n mandatory_fields = {'id', 'short_code', 'name', 'full_name', 'owner_type', 'owner', 'collection_type'}\n allowed_fields = [\n \"id\", \"short_code\", \"name\", \"full_name\", \"description\", \"collection_type\", \"custom_validation_schema\",\n \"public_access\", \"default_locale\", \"supported_locales\", \"website\", \"extras\", \"external_id\",\n ]\n\n def exists(self):\n return Collection.objects.filter(\n **{self.get_owner_type_filter(): self.get('owner'), 'mnemonic': self.get('id')}\n ).exists()\n\n def parse(self):\n owner_type = self.get('owner_type').lower()\n owner = self.get_owner()\n\n super().parse()\n\n self.data['mnemonic'] = self.data.pop('id')\n self.data[owner_type] = owner\n self.data['version'] = 'HEAD'\n\n supported_locales = self.get('supported_locales')\n if isinstance(supported_locales, str):\n self.data['supported_locales'] = supported_locales.split(',')\n\n self.data.pop('short_code')\n\n def process(self):\n coll = Collection(**self.data)\n errors = Collection.persist_new(coll, self.user)\n return errors or CREATED\n\n\nclass CollectionVersionImporter(BaseResourceImporter):\n mandatory_fields = {\"id\"}\n allowed_fields = [\"id\", \"external_id\", \"description\", \"released\"]\n\n def exists(self):\n return Collection.objects.filter(\n **{self.get_owner_type_filter(): self.get('owner'),\n 'mnemonic': self.get('collection'), 'version': self.get('id')}\n ).exists()\n\n def parse(self):\n owner_type = self.get('owner_type').lower()\n owner = self.get_owner()\n collection = self.get('collection')\n\n super().parse()\n\n self.data['version'] = self.data.pop('id')\n self.data['mnemonic'] = collection\n self.data[owner_type] = owner\n\n def process(self):\n coll = Collection(**self.data)\n errors = Collection.persist_new_version(coll, self.user)\n return errors or UPDATED\n\n\nclass ConceptImporter(BaseResourceImporter):\n mandatory_fields = {\"id\"}\n allowed_fields = [\"id\", \"external_id\", \"concept_class\", \"datatype\", \"names\", \"descriptions\", \"retired\", \"extras\"]\n\n def __init__(self, data, user, update_if_exists):\n super().__init__(data, user, update_if_exists)\n self.version = False\n\n def exists(self):\n return self.get_queryset().exists()\n\n def get_queryset(self):\n if self.queryset:\n return self.queryset\n\n self.queryset = Concept.objects.filter(\n **{'parent__' + self.get_owner_type_filter(): self.get('owner'),\n 'parent__mnemonic': self.get('source'),\n 'mnemonic': self.get('id'), 'id': F('versioned_object_id')}\n )\n return self.queryset\n\n def parse(self):\n source = Source.objects.filter(\n **{self.get_owner_type_filter(): self.get('owner')}, mnemonic=self.get('source'), version=HEAD\n ).first()\n super().parse()\n self.data['parent'] = source\n self.data['name'] = self.data['mnemonic'] = self.data.pop('id')\n\n def clean(self):\n if not self.is_valid():\n return False\n if self.exists() and self.update_if_exists:\n self.version = True\n\n self.parse()\n return True\n\n def process(self):\n if self.version:\n instance = self.get_queryset().first().clone()\n errors = Concept.create_new_version_for(instance, self.data, self.user)\n return errors or UPDATED\n\n instance = Concept.persist_new(self.data, self.user)\n if instance.id:\n return CREATED\n return instance.errors or FAILED\n\n\nclass MappingImporter(BaseResourceImporter):\n mandatory_fields = {\"map_type\", \"from_concept_url\"}\n allowed_fields = [\n \"id\", \"map_type\", \"from_concept_url\", \"to_source_url\", \"to_concept_url\", \"to_concept_code\",\n \"to_concept_name\", \"extras\", \"external_id\"\n ]\n\n def __init__(self, data, user, update_if_exists):\n super().__init__(data, user, update_if_exists)\n self.version = False\n\n def exists(self):\n return self.get_queryset().exists()\n\n def get_queryset(self):\n if self.queryset:\n return self.queryset\n\n from_concept_url = self.get('from_concept_url')\n to_concept_url = self.get('to_concept_url')\n to_concept_code = self.get('to_concept_code')\n to_source_url = self.get('to_source_url')\n filters = {\n 'parent__' + self.get_owner_type_filter(): self.get('owner'),\n 'parent__mnemonic': self.get('source'),\n 'id': F('versioned_object_id'),\n 'map_type': self.get('map_type'),\n 'from_concept__uri__icontains': drop_version(from_concept_url),\n }\n if to_concept_url:\n filters['to_concept__uri__icontains'] = drop_version(to_concept_url)\n if to_concept_code and to_source_url:\n filters['to_concept_code'] = to_concept_code\n filters['to_source__uri__icontains'] = drop_version(to_source_url)\n\n self.queryset = Mapping.objects.filter(**filters)\n\n return self.queryset\n\n def parse(self):\n source = Source.objects.filter(\n **{self.get_owner_type_filter(): self.get('owner')}, mnemonic=self.get('source'), version=HEAD\n ).first()\n self.data = self.get_filter_allowed_fields()\n self.data['parent'] = source\n\n if self.get('id'):\n self.data['mnemonic'] = self.data.pop('id')\n\n def clean(self):\n if not self.is_valid():\n return False\n if self.exists() and self.update_if_exists:\n self.version = True\n\n self.parse()\n return True\n\n def process(self):\n if self.version:\n instance = self.get_queryset().first().clone()\n errors = Mapping.create_new_version_for(instance, self.data, self.user)\n return errors or UPDATED\n instance = Mapping.persist_new(self.data, self.user)\n if instance.id:\n return CREATED\n return instance.errors or FAILED\n\n\nclass ReferenceImporter(BaseResourceImporter):\n mandatory_fields = {\"data\"}\n allowed_fields = [\"data\", \"collection\", \"owner\", \"owner_type\", \"__cascade\", \"collection_url\"]\n\n def exists(self):\n return False\n\n def get_queryset(self):\n if self.queryset:\n return self.queryset\n\n if self.get('collection', None):\n self.queryset = Collection.objects.filter(\n **{self.get_owner_type_filter(): self.get('owner')}, mnemonic=self.get('collection'), version='HEAD'\n )\n elif self.get('collection_url', None):\n self.queryset = Collection.objects.filter(uri=self.get('collection_url'))\n\n return self.queryset\n\n def process(self):\n collection = self.get_queryset().first()\n\n if collection:\n (added_references, _) = collection.add_expressions(\n self.get('data'), settings.API_BASE_URL, self.user, self.get('__cascade', False)\n )\n for ref in added_references:\n if ref.concepts:\n for concept in ref.concepts:\n concept.save()\n if ref.mappings:\n for mapping in ref.mappings:\n mapping.save()\n\n return CREATED\n return FAILED\n\n\nclass BulkImportInline(BaseImporter):\n def __init__( # pylint: disable=too-many-arguments\n self, content, username, update_if_exists=False, input_list=None, user=None, set_user=True,\n self_task_id=None\n ):\n super().__init__(content, username, update_if_exists, user, not bool(input_list), set_user)\n self.self_task_id = self_task_id\n if input_list:\n self.input_list = input_list\n self.unknown = []\n self.invalid = []\n self.exists = []\n self.created = []\n self.updated = []\n self.failed = []\n self.exception = []\n self.others = []\n self.processed = 0\n self.total = len(self.input_list)\n self.start_time = time.time()\n self.elapsed_seconds = 0\n\n def handle_item_import_result(self, result, item):\n if result is None:\n self.exists.append(item)\n return\n if result is False:\n self.invalid.append(item)\n return\n if result == FAILED:\n self.failed.append(item)\n return\n if isinstance(result, dict):\n item['errors'] = result\n self.failed.append(item)\n return\n if result == CREATED:\n self.created.append(item)\n return\n if result == UPDATED:\n self.updated.append(item)\n return\n\n print(\"****Unexpected Result****\", result)\n self.others.append(item)\n\n def notify_progress(self):\n if self.self_task_id:\n service = RedisService()\n service.set(self.self_task_id, self.processed)\n\n def run(self):\n if self.self_task_id:\n print(\"****STARTED SUBPROCESS****\")\n print(\"TASK ID: {}\".format(self.self_task_id))\n print(\"***************\")\n for original_item in self.input_list:\n self.processed += 1\n logger.info('Processing %s of %s', str(self.processed), str(self.total))\n self.notify_progress()\n item = original_item.copy()\n item_type = item.pop('type', '').lower()\n if not item_type:\n self.unknown.append(original_item)\n if item_type == 'organization':\n self.handle_item_import_result(\n OrganizationImporter(item, self.user, self.update_if_exists).run(), original_item\n )\n continue\n if item_type == 'source':\n self.handle_item_import_result(\n SourceImporter(item, self.user, self.update_if_exists).run(), original_item\n )\n continue\n if item_type == 'source version':\n self.handle_item_import_result(\n SourceVersionImporter(item, self.user, self.update_if_exists).run(), original_item\n )\n continue\n if item_type == 'collection':\n self.handle_item_import_result(\n CollectionImporter(item, self.user, self.update_if_exists).run(), original_item\n )\n continue\n if item_type == 'collection version':\n self.handle_item_import_result(\n CollectionVersionImporter(item, self.user, self.update_if_exists).run(), original_item\n )\n continue\n if item_type == 'concept':\n self.handle_item_import_result(\n ConceptImporter(item, self.user, self.update_if_exists).run(), original_item\n )\n continue\n if item_type == 'mapping':\n self.handle_item_import_result(\n MappingImporter(item, self.user, self.update_if_exists).run(), original_item\n )\n continue\n if item_type == 'reference':\n self.handle_item_import_result(\n ReferenceImporter(item, self.user, self.update_if_exists).run(), original_item\n )\n continue\n\n self.elapsed_seconds = time.time() - self.start_time\n\n self.make_result()\n\n return self.result\n\n @property\n def detailed_summary(self):\n return \"Processed: {}/{} | Created: {} | Updated: {} | Existing: {} | Time: {}secs\".format(\n self.processed, self.total, len(self.created), len(self.updated), len(self.exists), self.elapsed_seconds\n )\n\n @property\n def json_result(self):\n return dict(\n total=self.total, processed=self.processed, created=self.created, updated=self.updated,\n invalid=self.invalid, exists=self.exists, failed=self.failed, exception=self.exception,\n others=self.others, unknown=self.unknown, elapsed_seconds=self.elapsed_seconds\n )\n\n @property\n def report(self):\n return {\n k: len(v) if isinstance(v, list) else v for k, v in self.json_result.items()\n }\n\n def make_result(self):\n self.result = dict(\n json=self.json_result, detailed_summary=self.detailed_summary, report=self.report\n )\n\n\nclass BulkImportParallelRunner(BaseImporter): # pragma: no cover\n def __init__(\n self, content, username, update_if_exists, parallel=None, self_task_id=None\n ): # pylint: disable=too-many-arguments\n super().__init__(content, username, update_if_exists, None, False)\n self.start_time = time.time()\n self.self_task_id = self_task_id\n self.username = username\n self.total = 0\n self.resource_distribution = dict()\n self.parallel = int(parallel) if parallel else 5\n self.tasks = []\n self.groups = []\n self.results = []\n self.elapsed_seconds = 0\n self.resource_wise_time = dict()\n self.parts = [[]]\n self.result = None\n self._json_result = None\n self.redis_service = RedisService()\n if self.content:\n self.input_list = self.content.splitlines()\n self.total = len(self.input_list)\n self.make_resource_distribution()\n self.make_parts()\n\n def make_resource_distribution(self):\n for line in self.input_list:\n data = json.loads(line)\n data_type = data['type']\n if data_type not in self.resource_distribution:\n self.resource_distribution[data_type] = []\n self.resource_distribution[data_type].append(data)\n\n def make_parts(self):\n prev_line = None\n orgs = self.resource_distribution.get('Organization', None)\n sources = self.resource_distribution.get('Source', None)\n collections = self.resource_distribution.get('Collection', None)\n if orgs:\n self.parts = [orgs]\n if sources:\n self.parts.append(sources)\n if collections:\n self.parts.append(collections)\n\n self.parts = compact(self.parts)\n\n self.parts.append([])\n\n for data in self.input_list:\n line = json.loads(data)\n data_type = line.get('type', None).lower()\n if data_type not in ['organization', 'source', 'collection']:\n if prev_line:\n prev_type = prev_line.get('type').lower()\n if prev_type == data_type or (\n data_type not in ['concept', 'mapping'] and prev_type not in ['concept', 'mapping']\n ):\n self.parts[-1].append(line)\n else:\n self.parts.append([line])\n else:\n self.parts[-1].append(line)\n prev_line = line\n\n self.parts = compact(self.parts)\n\n @staticmethod\n def chunker_list(seq, size):\n return (seq[i::size] for i in range(size))\n\n def is_any_process_alive(self):\n if not self.groups:\n return False\n\n result = True\n\n try:\n result = any(grp.completed_count() != len(grp) for grp in self.groups)\n except: # pylint: disable=bare-except\n pass\n\n return result\n\n def get_overall_tasks_progress(self):\n total_processed = 0\n if not self.tasks:\n return total_processed\n\n for task in self.tasks:\n try:\n if task.task_id:\n total_processed += self.redis_service.get_int(task.task_id)\n except: # pylint: disable=bare-except\n pass\n\n return total_processed\n\n def get_details_to_notify(self):\n summary = \"Started: {} | Processed: {}/{} | Time: {}secs\".format(\n self.start_time_formatted, self.get_overall_tasks_progress(), self.total, self.elapsed_seconds\n )\n\n return dict(summary=summary)\n\n def get_sub_task_ids(self):\n return {task.task_id: task.state for task in self.tasks}\n\n def notify_progress(self):\n if self.self_task_id:\n try:\n self.redis_service.set_json(self.self_task_id, self.get_details_to_notify())\n except: # pylint: disable=bare-except\n pass\n\n def wait_till_tasks_alive(self):\n while self.is_any_process_alive():\n self.update_elapsed_seconds()\n self.notify_progress()\n time.sleep(1)\n\n def run(self):\n if self.self_task_id:\n print(\"****STARTED MAIN****\")\n print(\"TASK ID: {}\".format(self.self_task_id))\n print(\"***************\")\n for part_list in self.parts:\n if part_list:\n part_type = get(part_list, '0.type', '').lower()\n if part_type:\n is_child = part_type in ['concept', 'mapping']\n start_time = time.time()\n self.queue_tasks(part_list, is_child)\n self.wait_till_tasks_alive()\n if is_child:\n if part_type not in self.resource_wise_time:\n self.resource_wise_time[part_type] = 0\n self.resource_wise_time[part_type] += (time.time() - start_time)\n\n self.update_elapsed_seconds()\n\n self.make_result()\n\n return self.result\n\n def update_elapsed_seconds(self):\n self.elapsed_seconds = time.time() - self.start_time\n\n @property\n def detailed_summary(self):\n result = self.json_result\n return \"Started: {} | Processed: {}/{} | Created: {} | Updated: {} | Existing: {} | Time: {}secs\".format(\n self.start_time_formatted, result.get('processed'), result.get('total'),\n len(result.get('created')), len(result.get('updated')), len(result.get('exists')), self.elapsed_seconds\n )\n\n @property\n def start_time_formatted(self):\n return datetime.fromtimestamp(self.start_time)\n\n @property\n def json_result(self):\n if self._json_result:\n return self._json_result\n\n total_result = dict(\n total=0, processed=0, created=[], updated=[],\n invalid=[], exists=[], failed=[], exception=[],\n others=[], unknown=[], elapsed_seconds=self.elapsed_seconds\n )\n for task in self.tasks:\n result = task.result.get('json')\n for key in total_result:\n total_result[key] += result.get(key)\n\n total_result['start_time'] = self.start_time_formatted\n total_result['elapsed_seconds'] = self.elapsed_seconds\n total_result['child_resource_time_distribution'] = self.resource_wise_time\n self._json_result = total_result\n return self._json_result\n\n @property\n def report(self):\n data = {\n k: len(v) if isinstance(v, list) else v for k, v in self.json_result.items()\n }\n\n data['child_resource_time_distribution'] = self.resource_wise_time\n\n return data\n\n def make_result(self):\n self.result = dict(\n json=self.json_result, detailed_summary=self.detailed_summary, report=self.report\n )\n\n def queue_tasks(self, part_list, is_child):\n if is_child:\n chunked_lists = self.chunker_list(part_list, self.parallel)\n else:\n chunked_lists = [part_list]\n\n chunked_lists = compact(chunked_lists)\n\n queue = 'concurrent'\n jobs = group(bulk_import_parts_inline.s(_list, self.username, self.update_if_exists) for _list in chunked_lists)\n group_result = jobs.apply_async(queue=queue)\n self.groups.append(group_result)\n self.tasks += group_result.results\n", "sub_path": "core/importers/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 27156, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "celery.utils.log.get_task_logger", "line_number": 23, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}, {"api_name": "core.users.models.UserProfile.objects.get", "line_number": 65, "usage_type": "call"}, {"api_name": "core.users.models.UserProfile.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "core.users.models.UserProfile", "line_number": 65, "usage_type": "name"}, {"api_name": "ocldev.oclfleximporter.OclFlexImporter", "line_number": 77, "usage_type": "call"}, {"api_name": "django.conf.settings.API_BASE_URL", "line_number": 79, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 79, "usage_type": "name"}, {"api_name": "core.orgs.models.Organization.objects.filter", "line_number": 138, "usage_type": "call"}, {"api_name": "core.orgs.models.Organization.objects", "line_number": 138, "usage_type": "attribute"}, {"api_name": "core.orgs.models.Organization", "line_number": 138, "usage_type": "name"}, {"api_name": "core.users.models.UserProfile.objects.filter", "line_number": 140, "usage_type": "call"}, {"api_name": "core.users.models.UserProfile.objects", "line_number": 140, "usage_type": "attribute"}, {"api_name": "core.users.models.UserProfile", "line_number": 140, "usage_type": "name"}, {"api_name": "core.orgs.models.Organization.objects.filter", "line_number": 171, "usage_type": "call"}, {"api_name": "core.orgs.models.Organization.objects", "line_number": 171, "usage_type": "attribute"}, {"api_name": "core.orgs.models.Organization", "line_number": 171, "usage_type": "name"}, {"api_name": "core.orgs.models.Organization.objects.create", "line_number": 178, "usage_type": "call"}, {"api_name": "core.orgs.models.Organization.objects", "line_number": 178, "usage_type": "attribute"}, {"api_name": "core.orgs.models.Organization", "line_number": 178, "usage_type": "name"}, {"api_name": "core.sources.models.Source.objects.filter", "line_number": 192, "usage_type": "call"}, {"api_name": "core.sources.models.Source.objects", "line_number": 192, "usage_type": "attribute"}, {"api_name": "core.sources.models.Source", "line_number": 192, "usage_type": "name"}, {"api_name": "core.sources.models.Source", "line_number": 213, "usage_type": "call"}, {"api_name": "core.sources.models.Source.persist_new", "line_number": 214, "usage_type": "call"}, {"api_name": "core.sources.models.Source", "line_number": 214, "usage_type": "name"}, {"api_name": "core.sources.models.Source.objects.filter", "line_number": 223, "usage_type": "call"}, {"api_name": "core.sources.models.Source.objects", "line_number": 223, "usage_type": "attribute"}, {"api_name": "core.sources.models.Source", "line_number": 223, "usage_type": "name"}, {"api_name": "core.sources.models.Source", "line_number": 240, "usage_type": "call"}, {"api_name": "core.sources.models.Source.persist_new_version", "line_number": 241, "usage_type": "call"}, {"api_name": "core.sources.models.Source", "line_number": 241, "usage_type": "name"}, {"api_name": "core.collections.models.Collection.objects.filter", "line_number": 253, "usage_type": "call"}, {"api_name": "core.collections.models.Collection.objects", "line_number": 253, "usage_type": "attribute"}, {"api_name": "core.collections.models.Collection", "line_number": 253, "usage_type": "name"}, {"api_name": "core.collections.models.Collection", "line_number": 274, "usage_type": "call"}, {"api_name": "core.collections.models.Collection.persist_new", "line_number": 275, "usage_type": "call"}, {"api_name": "core.collections.models.Collection", "line_number": 275, "usage_type": "name"}, {"api_name": "core.collections.models.Collection.objects.filter", "line_number": 284, "usage_type": "call"}, {"api_name": "core.collections.models.Collection.objects", "line_number": 284, "usage_type": "attribute"}, {"api_name": "core.collections.models.Collection", "line_number": 284, "usage_type": "name"}, {"api_name": "core.collections.models.Collection", "line_number": 301, "usage_type": "call"}, {"api_name": "core.collections.models.Collection.persist_new_version", "line_number": 302, "usage_type": "call"}, {"api_name": "core.collections.models.Collection", "line_number": 302, "usage_type": "name"}, {"api_name": "core.concepts.models.Concept.objects.filter", "line_number": 321, "usage_type": "call"}, {"api_name": "core.concepts.models.Concept.objects", "line_number": 321, "usage_type": "attribute"}, {"api_name": "core.concepts.models.Concept", "line_number": 321, "usage_type": "name"}, {"api_name": "django.db.models.F", "line_number": 324, "usage_type": "call"}, {"api_name": "core.sources.models.Source.objects.filter", "line_number": 329, "usage_type": "call"}, {"api_name": "core.sources.models.Source.objects", "line_number": 329, "usage_type": "attribute"}, {"api_name": "core.sources.models.Source", "line_number": 329, "usage_type": "name"}, {"api_name": "core.common.constants.HEAD", "line_number": 330, "usage_type": "name"}, {"api_name": "core.concepts.models.Concept.create_new_version_for", "line_number": 348, "usage_type": "call"}, {"api_name": "core.concepts.models.Concept", "line_number": 348, "usage_type": "name"}, {"api_name": "core.concepts.models.Concept.persist_new", "line_number": 351, "usage_type": "call"}, {"api_name": "core.concepts.models.Concept", "line_number": 351, "usage_type": "name"}, {"api_name": "django.db.models.F", "line_number": 382, "usage_type": "call"}, {"api_name": "core.common.utils.drop_version", "line_number": 384, "usage_type": "call"}, {"api_name": "core.common.utils.drop_version", "line_number": 387, "usage_type": "call"}, {"api_name": "core.common.utils.drop_version", "line_number": 390, "usage_type": "call"}, {"api_name": "core.mappings.models.Mapping.objects.filter", "line_number": 392, "usage_type": "call"}, {"api_name": "core.mappings.models.Mapping.objects", "line_number": 392, "usage_type": "attribute"}, {"api_name": "core.mappings.models.Mapping", "line_number": 392, "usage_type": "name"}, {"api_name": "core.sources.models.Source.objects.filter", "line_number": 397, "usage_type": "call"}, {"api_name": "core.sources.models.Source.objects", "line_number": 397, "usage_type": "attribute"}, {"api_name": "core.sources.models.Source", "line_number": 397, "usage_type": "name"}, {"api_name": "core.common.constants.HEAD", "line_number": 398, "usage_type": "name"}, {"api_name": "core.mappings.models.Mapping.create_new_version_for", "line_number": 418, "usage_type": "call"}, {"api_name": "core.mappings.models.Mapping", "line_number": 418, "usage_type": "name"}, {"api_name": "core.mappings.models.Mapping.persist_new", "line_number": 420, "usage_type": "call"}, {"api_name": "core.mappings.models.Mapping", "line_number": 420, "usage_type": "name"}, {"api_name": "core.collections.models.Collection.objects.filter", "line_number": 438, "usage_type": "call"}, {"api_name": "core.collections.models.Collection.objects", "line_number": 438, "usage_type": "attribute"}, {"api_name": "core.collections.models.Collection", "line_number": 438, "usage_type": "name"}, {"api_name": "core.collections.models.Collection.objects.filter", "line_number": 442, "usage_type": "call"}, {"api_name": "core.collections.models.Collection.objects", "line_number": 442, "usage_type": "attribute"}, {"api_name": "core.collections.models.Collection", "line_number": 442, "usage_type": "name"}, {"api_name": "django.conf.settings.API_BASE_URL", "line_number": 451, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 451, "usage_type": "name"}, {"api_name": "time.time", "line_number": 484, "usage_type": "call"}, {"api_name": "core.common.services.RedisService", "line_number": 513, "usage_type": "call"}, {"api_name": "time.time", "line_number": 570, "usage_type": "call"}, {"api_name": "time.time", "line_number": 607, "usage_type": "call"}, {"api_name": "core.common.services.RedisService", "line_number": 621, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 630, "usage_type": "call"}, {"api_name": "pydash.compact", "line_number": 648, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 653, "usage_type": "call"}, {"api_name": "pydash.compact", "line_number": 668, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 722, "usage_type": "call"}, {"api_name": "pydash.get", "line_number": 731, "usage_type": "call"}, {"api_name": "time.time", "line_number": 734, "usage_type": "call"}, {"api_name": "time.time", "line_number": 740, "usage_type": "call"}, {"api_name": "time.time", "line_number": 749, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 761, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 761, "usage_type": "name"}, {"api_name": "pydash.compact", "line_number": 805, "usage_type": "call"}, {"api_name": "celery.group", "line_number": 808, "usage_type": "call"}, {"api_name": "core.common.tasks.bulk_import_parts_inline.s", "line_number": 808, "usage_type": "call"}, {"api_name": "core.common.tasks.bulk_import_parts_inline", "line_number": 808, "usage_type": "name"}]} +{"seq_id": "429591972", "text": "#!/usr/bin/env python2\n\nimport pygame as pg\nimport sys\nimport constants as const\nimport boneC\nimport frameC\nimport utils\n\ndef framenumGen(val=1):\n \n while True:\n yield val\n val += 1\n\ndef parent(parent_bone, child_bone):\n \n child_bone.parent = parent_bone\n parent_bone.children_list.append(child_bone)\n\n # fix pivot points and handle points\n if parent_bone.type == const.TYPE_CHIEF_BONE and parent_bone.SP_SEL:\n utils.snapToParentTranslator(parent_bone, child_bone, 1, -1, True)\n child_bone.wunderkind = True\n\n else:\n utils.snapToParent(parent_bone, child_bone, 1, -1)\n \"\"\"\n del_x = (child_bone.pos[0] - parent_bone.handle.pos[0])\n del_y = -(child_bone.pos[1] - parent_bone.handle.pos[1])\n\n new_pos = child_bone.handle.pos[0] + del_x, child_bone.handle.pos[1] + del_y\n\n child_bone.pos = parent_bone.handle.pos\n child_bone.updateHandle(new_pos)\n \"\"\"\n\n child_bone.parenting_code = 0\n parent_bone.parenting_code = 0\n\ndef main():\n \n pg.init()\n clock = pg.time.Clock()\n mainS = pg.display.set_mode(const.SCREEN_SIZE)\n pg.display.set_caption(\"Pivotix\")\n\n bone0 = boneC.Bone(const.DEF_BONE_POS, None, mainS)\n\n main_bone = boneC.ChiefBone(const.DEF_CBONE_POS, mainS)\n\n bone_list = []\n frame_list = []\n\n fgen = framenumGen()\n\n cur_frame = frameC.Frame(mainS, fgen.next(), bone_list)\n\n\n bone_list.append(bone0)\n bone_list.append(main_bone)\n\n cur_pos = pg.mouse.get_pos()\n\n\n univ_parent_no = 0 # number of bones selected for parenting\n parent_mode = False\n\n select_mutex = False # to allow only one bone to be\n # selected at a time (i.e. layering)\n\n frame_snap_mode = False # snapping frames\n\n while True:\n \n cur_pos = pg.mouse.get_pos()\n\n for event in pg.event.get():\n \n if event.type == pg.QUIT:\n pg.quit()\n sys.exit()\n\n # Toggle parent mode using Q\n\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_q:\n parent_mode = not parent_mode\n\n if event.key == pg.K_a:\n new_bone = boneC.Bone(const.DEF_BONE_POS, None, mainS)\n bone_list.append(new_bone)\n\n if event.key == pg.K_f:\n #frame_snap_mode = not frame_snap_mode\n frame_snap_mode = True\n\n if event.key == pg.K_e:\n cur_frame = utils.getNextFrame(cur_frame, frame_list)\n\n if event.key == pg.K_w:\n cur_frame = utils.getPrevFrame(cur_frame, frame_list)\n\n if event.type == pg.MOUSEBUTTONDOWN:\n \n for each in bone_list:\n if each.handle.rect.collidepoint(cur_pos):\n \n if not select_mutex:\n select_mutex = True\n each.grabbed = True\n\n if each.type == const.TYPE_CHIEF_BONE:\n \n if not select_mutex and each.translator.rect.collidepoint(cur_pos):\n each.trans_grabbed = True\n\n select_mutex = True\n\n # for parenting\n if parent_mode:\n \n if univ_parent_no == 0:\n\n if each.pos_rect.collidepoint(cur_pos):\n \n univ_parent_no = 1\n each.parenting_code = univ_parent_no\n\n elif univ_parent_no == 1:\n \n\n if each.type == const.TYPE_CHIEF_BONE:\n \n if each.translator.rect.collidepoint(cur_pos):\n each.SP_SEL = True\n univ_parent_no = 2\n each.parenting_code = univ_parent_no\n\n if each.handle.rect.collidepoint(cur_pos):\n univ_parent_no = 2\n each.parenting_code = univ_parent_no\n\n if event.type == pg.MOUSEBUTTONUP:\n \n for each in bone_list:\n if each.grabbed:\n each.grabbed = False\n select_mutex = False\n\n if each.type == const.TYPE_CHIEF_BONE and each.trans_grabbed:\n each.trans_grabbed = False\n select_mutex = False\n\n\n mainS.fill(const.COL_BLUE)\n\n # Load the bones from the current frame\n\n if cur_frame:\n bone_list = cur_frame.bone_list\n\n # Rotate the bones, if any are grabbed\n # Also, draw the bones\n for bone in bone_list:\n \n if bone.grabbed:\n bone.rotate(cur_pos)\n\n\n if bone.type == const.TYPE_CHIEF_BONE and bone.trans_grabbed:\n bone.translate(cur_pos)\n\n bone.draw()\n\n\n # Draw the bone handles on top of all others\n if not frame_snap_mode:\n for bone in bone_list:\n bone.handle.draw()\n\n # Draw the translator on top of everything else\n for bone in bone_list:\n if bone.type == const.TYPE_CHIEF_BONE:\n bone.translator.draw()\n\n # reset all parenting codes if parent_mode is toggle False\n if not parent_mode:\n for bone in bone_list:\n bone.parenting_code = 0\n\n if parent_mode and univ_parent_no == 2:\n \n for bone in bone_list:\n \n if bone.parenting_code == 1:\n \n child_b = bone\n\n if bone.parenting_code == 2:\n \n parent_b = bone\n\n #if the child bone already has a parent, cancel\n \n if not child_b.parent: \n parent(parent_b, child_b)\n\n univ_parent_no = 0\n parent_mode = False\n\n\n \n\n pg.display.update()\n\n if frame_snap_mode:\n new_f = frameC.Frame(mainS, fgen.next(), bone_list)\n frame_list.append(new_f)\n #pg.image.save(mainS, \"frame.jpeg\")\n frame_snap_mode = False\n cur_frame = new_f\n\n\n clock.tick(const.FPS)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "two-d_animsoft/bkup/main_b.py", "file_name": "main_b.py", "file_ext": "py", "file_size_in_byte": 6602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "constants.TYPE_CHIEF_BONE", "line_number": 22, "usage_type": "attribute"}, {"api_name": "utils.snapToParentTranslator", "line_number": 23, "usage_type": "call"}, {"api_name": "utils.snapToParent", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 45, "usage_type": "attribute"}, {"api_name": "constants.SCREEN_SIZE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 46, "usage_type": "attribute"}, {"api_name": "boneC.Bone", "line_number": 48, "usage_type": "call"}, {"api_name": "constants.DEF_BONE_POS", "line_number": 48, "usage_type": "attribute"}, {"api_name": "boneC.ChiefBone", "line_number": 50, "usage_type": "call"}, {"api_name": "constants.DEF_CBONE_POS", "line_number": 50, "usage_type": "attribute"}, {"api_name": "frameC.Frame", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.mouse.get_pos", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 81, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.K_q", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 90, "usage_type": "attribute"}, {"api_name": "boneC.Bone", "line_number": 91, "usage_type": "call"}, {"api_name": "constants.DEF_BONE_POS", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pygame.K_f", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.K_e", "line_number": 98, "usage_type": "attribute"}, {"api_name": "utils.getNextFrame", "line_number": 99, "usage_type": "call"}, {"api_name": "pygame.K_w", "line_number": 101, "usage_type": "attribute"}, {"api_name": "utils.getPrevFrame", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 104, "usage_type": "attribute"}, {"api_name": "constants.TYPE_CHIEF_BONE", "line_number": 113, "usage_type": "attribute"}, {"api_name": "constants.TYPE_CHIEF_BONE", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 144, "usage_type": "attribute"}, {"api_name": "constants.TYPE_CHIEF_BONE", "line_number": 151, "usage_type": "attribute"}, {"api_name": "constants.COL_BLUE", "line_number": 156, "usage_type": "attribute"}, {"api_name": "constants.TYPE_CHIEF_BONE", "line_number": 171, "usage_type": "attribute"}, {"api_name": "constants.TYPE_CHIEF_BONE", "line_number": 184, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 215, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 215, "usage_type": "attribute"}, {"api_name": "frameC.Frame", "line_number": 218, "usage_type": "call"}, {"api_name": "constants.FPS", "line_number": 225, "usage_type": "attribute"}]} +{"seq_id": "90424055", "text": "from shapely.ops import cascaded_union, polygonize\nfrom scipy.spatial import Delaunay\nimport numpy as np\nimport math\nimport shapely.geometry as geometry\nfrom matplotlib.path import Path\nfrom shapely.geometry import Point\nfrom shapely.affinity import rotate\nimport os\n\ndef strided_indexing_roll(a, r):\n # https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently\n # Concatenate with sliced to cover all rolls\n a_ext = np.concatenate((a,a[:,:-1]),axis=1)\n\n # Get sliding windows; use advanced-indexing to select appropriate ones\n n = a.shape[1]\n return viewW(a_ext,(1,n))[np.arange(len(r)), (n-r)%n,0]\n\ndef safe_mkdir(path):\n \"\"\" \n Create a directory if there isn't one already. \n \"\"\"\n try:\n os.mkdir(path)\n except OSError:\n pass\n\ndef alpha_shape(points, alpha):\n \"\"\"\n Compute the alpha shape (concave hull) of a set of points.\n\n @param points: Iterable container of points.\n @param alpha: alpha value to influence the gooeyness of the border. Smaller\n numbers don't fall inward as much as larger numbers. Too large,\n and you lose everything.\n \"\"\"\n if len(points) < 4:\n # When you have a triangle, there is no sense in computing an alpha\n # shape.\n return geometry.MultiPoint(list(points)).convex_hull\n\n def add_edge(edges, edge_points, coords, i, j):\n \"\"\"Add a line between the i-th and j-th points, if not in the list already\"\"\"\n if (i, j) in edges or (j, i) in edges:\n # already added\n return\n edges.add( (i, j) )\n edge_points.append(coords[ [i, j] ])\n\n coords = np.array([point.coords[0] for point in points])\n\n tri = Delaunay(coords)\n edges = set()\n edge_points = []\n # loop over triangles:\n # ia, ib, ic = indices of corner points of the triangle\n for ia, ib, ic in tri.vertices:\n pa = coords[ia]\n pb = coords[ib]\n pc = coords[ic]\n\n # Lengths of sides of triangle\n a = math.sqrt((pa[0]-pb[0])**2 + (pa[1]-pb[1])**2)\n b = math.sqrt((pb[0]-pc[0])**2 + (pb[1]-pc[1])**2)\n c = math.sqrt((pc[0]-pa[0])**2 + (pc[1]-pa[1])**2)\n\n # Semiperimeter of triangle\n s = (a + b + c)/2.0\n\n # Area of triangle by Heron's formula\n area = math.sqrt(s*(s-a)*(s-b)*(s-c))\n circum_r = a*b*c/(4.0*area)\n\n # Here's the radius filter.\n #print circum_r\n if circum_r < 1.0/alpha:\n add_edge(edges, edge_points, coords, ia, ib)\n add_edge(edges, edge_points, coords, ib, ic)\n add_edge(edges, edge_points, coords, ic, ia)\n\n m = geometry.MultiLineString(edge_points)\n triangles = list(polygonize(m))\n return cascaded_union(triangles), edge_points\n\ndef generate_one_d_image(polyg, nx=64,ny=64, shift_n = 8):\n \"\"\" \n Takes a two-dimentional polygon as its input,\n and produces the polygons one-dimentional density \n by taking integral along the x-axis. \"\"\"\n poly_verts = list(np.array(polyg.exterior.xy).T)\n\n # Create vertex coordinates for each grid cell...\n # (<0,0> is at the top left of the grid in this system)\n x, y = np.meshgrid(np.arange(nx), np.arange(ny))\n x, y = x.flatten(), y.flatten()\n\n points = np.vstack((x,y)).T\n\n path = Path(poly_verts)\n grid = path.contains_points(points)\n grid = grid.reshape((ny,nx))\n \n oneDImage = grid.sum(axis=0).astype('uint8')\n \n # random image translations\n if(shift_n > 0):\n temp_1d_np = np.zeros(nx+shift_n*2)\n col_start = np.random.randint(-shift_n, shift_n+1) # [low,high)\n temp_1d_np[8+col_start:8+col_start+64] = oneDImage\n oneDImage = np.copy(temp_1d_np[8:8+64])\n \n return(oneDImage)\n \ndef two_d_image(polyg, nx0=64,nx1=64,o_nx0=48, o_nx1=48):\n \"\"\" \n Takes a two-dimentional polygon as its input,\n and produces 2d boolean mapping.\n \"\"\"\n poly_verts = list(np.array(polyg.exterior.xy).T)\n\n # Create vertex coordinates for each grid cell...\n # (<0,0> is at the top left of the grid in this system)\n x, y = np.meshgrid(np.arange(nx0), np.arange(nx1))\n x, y = x.flatten(), y.flatten()\n\n points = np.vstack((x,y)).T\n\n path = Path(poly_verts)\n grid = path.contains_points(points)\n grid = grid.reshape((nx1,nx0))\n \n # finds the the minimum x_0 and x_1 values of the object. \n # lower limit exists to keep the dimension equal to o_nx\n temp= grid.argmax(axis=0)\n min_x0 = min(temp[temp>0].min(),nx0-o_nx0)\n temp= grid.argmax(axis=1)\n min_x1 = min(temp[temp>0].min(),nx1-o_nx1)\n # returns an array sized (o_nx0, o_nx1), where we crop \n # out the empty rows at the top, and the empty columns\n # on the left of the array. This is done to reduce the\n # search space for calculating the orbit loss.\n return(grid[min_x0:min_x0+o_nx0, min_x1:min_x1+o_nx1].astype('uint8'))\n \ndef gen_rand_poly_images(n = 1000,img_size = 64,outp_size=48, n_point = 60, alpha = .2):\n \"\"\"\n Generates a polygon by computing a randomly generated two\n dimentional concave hull. The function returns a sample of\n the polygons one-dimentional images, and their degrees of \n rotations.\n \"\"\"\n #randomizes the seeds in each worker process\n np.random.seed()\n \n #generate objects at the center of the image\n a = np.random.uniform(img_size // 4, (img_size * 3) // 4, size=(n_point,2))\n points = [Point(a[i]) for i in range(n_point)]\n concave_hull, edge_points = alpha_shape(points, alpha=alpha)\n \n #if the result is a multipolygon, select the first polygon\n t_poly = concave_hull.buffer(1)\n if(type(t_poly) == geometry.multipolygon.MultiPolygon):\n t_poly = t_poly[0]\n \n rotation_angles = np.hstack((np.array([0.],dtype='float16'), np.random.uniform(0,360, size = n-1).astype('float16')))\n rotated_polygons = [rotate(t_poly,k) for k in rotation_angles]\n one_d_images = np.array([generate_one_d_image(rotated_polygons[k], nx=img_size,ny=img_size) for k in range(n)])\n #two_d_img = two_d_image(t_poly, nx=img_size,ny=img_size)\n two_d_images = np.array([two_d_image(p,nx0=img_size\n ,nx1=img_size\n ,o_nx0=outp_size\n ,o_nx1=outp_size )\n for p in [rotate(t_poly,k) for k in range(360)]])\n\n return(rotation_angles, one_d_images, two_d_images)\n", "sub_path": "src.py", "file_name": "src.py", "file_ext": "py", "file_size_in_byte": 6463, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.concatenate", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 18, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 25, "usage_type": "call"}, {"api_name": "shapely.geometry.MultiPoint", "line_number": 41, "usage_type": "call"}, {"api_name": "shapely.geometry", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.spatial.Delaunay", "line_number": 53, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 64, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 65, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 66, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 72, "usage_type": "call"}, {"api_name": "shapely.geometry.MultiLineString", "line_number": 82, "usage_type": "call"}, {"api_name": "shapely.geometry", "line_number": 82, "usage_type": "name"}, {"api_name": "shapely.ops.polygonize", "line_number": 83, "usage_type": "call"}, {"api_name": "shapely.ops.cascaded_union", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.path.Path", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.path.Path", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 153, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 156, "usage_type": "attribute"}, {"api_name": "shapely.geometry.Point", "line_number": 157, "usage_type": "call"}, {"api_name": "shapely.geometry.multipolygon", "line_number": 162, "usage_type": "attribute"}, {"api_name": "shapely.geometry", "line_number": 162, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 165, "usage_type": "attribute"}, {"api_name": "shapely.affinity.rotate", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 169, "usage_type": "call"}, {"api_name": "shapely.affinity.rotate", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "636261401", "text": "\"\"\" fundamental_analysis/yield_curve_model.py tests \"\"\"\nimport unittest\nfrom datetime import datetime\nfrom io import StringIO\nfrom unittest import mock\n\nimport pandas as pd\n\nfrom gamestonk_terminal.stocks.fundamental_analysis.yield_curve_model import (\n get_yield_curve,\n get_yield_curve_year,\n)\n\n# pylint: disable=unused-import\nfrom gamestonk_terminal.test_helper import ( # noqa: F401\n parameterize_from_file,\n pytest_generate_tests,\n)\n\nassertions = unittest.TestCase(\"__init__\")\n\n\nclass TestFaYieldCurveModel:\n @mock.patch(\n \"gamestonk_terminal.stocks.fundamental_analysis.yield_curve_model.get_yield_curve_year\"\n )\n @parameterize_from_file(\n \"test_get_yield_curve\",\n \"../tests/data/fa_yield_curve_model.yaml\",\n )\n # pylint: disable=too-many-arguments\n def test_get_yield_curve(\n self,\n mock_get_yield_curve_year,\n start,\n end,\n mock_get_yield_curve_year_rets,\n expected_result,\n ):\n\n rets = []\n for a_ret in mock_get_yield_curve_year_rets:\n rets.append(\n pd.read_csv(StringIO(a_ret), header=0, index_col=0, parse_dates=True)\n )\n\n mock_get_yield_curve_year.side_effect = rets\n df = get_yield_curve(\n datetime.strptime(start, \"%m/%d/%y\"),\n datetime.strptime(end, \"%m/%d/%y\"),\n )\n\n assertions.assertEqual(\n df.to_csv().replace(\"\\r\\n\", \"\\n\"), expected_result.replace(\"\\r\\n\", \"\\n\")\n )\n\n @mock.patch(\n \"gamestonk_terminal.stocks.fundamental_analysis.yield_curve_model.requests\"\n )\n @parameterize_from_file(\n \"test_get_yield_curve_year\",\n \"../tests/data/fa_yield_curve_model.yaml\",\n )\n def test_get_yield_curve_year(\n self, mock_request_get, year, mock_yield_curve_page, expected_result\n ):\n mock_request_get.get().text = mock_yield_curve_page\n\n ret = get_yield_curve_year(year)\n\n assertions.assertEqual(\n ret.to_csv().replace(\"\\r\\n\", \"\\n\"), expected_result.replace(\"\\r\\n\", \"\\n\")\n )\n", "sub_path": "tests/test_fa/test_fa_yield_curve_model.py", "file_name": "test_fa_yield_curve_model.py", "file_ext": "py", "file_size_in_byte": 2081, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 44, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 44, "usage_type": "call"}, {"api_name": "gamestonk_terminal.stocks.fundamental_analysis.yield_curve_model.get_yield_curve", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 50, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 24, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 24, "usage_type": "name"}, {"api_name": "gamestonk_terminal.test_helper.parameterize_from_file", "line_number": 27, "usage_type": "call"}, {"api_name": "gamestonk_terminal.stocks.fundamental_analysis.yield_curve_model.get_yield_curve_year", "line_number": 69, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 57, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 57, "usage_type": "name"}, {"api_name": "gamestonk_terminal.test_helper.parameterize_from_file", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "14046419", "text": "# -*- encoding utf-8 -*-\nimport datetime\nimport cx_Oracle\nfrom DataException import DataException\n\n\nclass MetaDataService:\n \"\"\"\n Service to process several database operations on the\n meta data, request table and content table\n \"\"\"\n\n def __init__(self, connection, archive_connection, db_config, arch_config):\n \"\"\"\n Creates a new MetaDataService instance\n\n :param connection: A connection object (cx_Oracle connection object)\n :param archive_connection: A connection object for the archive db (cx_Oracle connection object)\n :param db_config: Database configuration values\n :param arch_config: Archive database configuration values\n :return: New MetaDataService\n \"\"\"\n self.con = connection\n self.a_con = archive_connection\n self.db_config = db_config\n self.arch_config = arch_config\n\n def set_console_logger(self, console_logger):\n \"\"\"\n Set the console logger\n\n :param console_logger: logger instance\n \"\"\"\n self.__c_logger = console_logger\n\n def set_file_logger(self, file_logger):\n \"\"\"\n Set the file logger\n\n :param file_logger: logger instance\n \"\"\"\n self.__f_logger = file_logger\n\n def find_all_requests(self):\n \"\"\"\n Queries all dataset names by the names in the ArcGIS requests table\n\n :return: Names (List)\n :exception: DataException\n \"\"\"\n cur = None\n try:\n query = \"SELECT r.NAME_OF_DATASET FROM SDE.\" + self.db_config[\"request_table\"] + \" r\"\n\n cur = self.con.cursor()\n cur.prepare(query)\n cur.execute(None)\n cur.arraysize = 100\n result = cur.fetchall()\n list = []\n for r in result:\n list.append(r[0])\n return list\n except cx_Oracle.DatabaseError as e:\n self.__c_logger.exception(\"EXCEPTION WHILE finding meta data: \" + str(e))\n self.__f_logger.exception(\"EXCEPTION WHILE finding meta data: \" + str(e))\n raise DataException(\"Error while fetching all data sets: \" + str(e))\n finally:\n if cur is not None:\n cur.close()\n\n def meta_data_exists(self, dataset_name):\n \"\"\"\n Checks the existence of meta data by a given dataset name\n\n :param name: The dataset name (String)\n\n :return: (Boolean)\n :exception: DataException\n \"\"\"\n\n self.__c_logger.info(\"Check if in DB: \" + str(dataset_name))\n self.__f_logger.info(\"Check if in DB: \" + str(dataset_name))\n cur = None\n try:\n query = \"SELECT i.NAME \" \\\n \"FROM SDE.GDB_ITEMS_VW i LEFT JOIN SDE.GDB_ITEMTYPES t \" \\\n \"ON i.Type = t.UUID \" \\\n \"WHERE i.NAME = :data_name \" \\\n \"AND t.NAME IN ('Feature Dataset', 'Raster Dataset', 'Table', 'Raster Catalog', 'Mosaic Dataset') \" \\\n \"AND length(i.DOCUMENTATION) > 1 \" \\\n \"AND i.DOCUMENTATION IS NOT NULL \"\n\n cur = self.con.cursor()\n cur.prepare(query)\n cur.execute(None, {\"data_name\": str(dataset_name)})\n result = cur.fetchall()\n if len(result) > 0:\n return True\n return False\n except Exception as e:\n self.__c_logger.exception(\"EXCEPTION WHILE checking the existence of meta data: \" + str(e))\n self.__f_logger.exception(\"EXCEPTION WHILE checking the existence of meta data: \" + str(e))\n raise DataException(\"Error while fetching all datasets: \" + str(e))\n finally:\n if cur is not None:\n cur.close()\n\n def find_meta_data_by_dataset_names(self):\n \"\"\"\n Queries all XML meta data CLOBS by the names in the ArcGIS REQUEST table\n\n :return: Meta data (Dictionary)\n :exception: DataException\n \"\"\"\n self.__c_logger.info(\"Find all meta data for all data set names\")\n self.__f_logger.info(\"Find all meta data for all data set names\")\n cur = None\n try:\n query = \"SELECT i.NAME, t.NAME, i.DOCUMENTATION \" \\\n \"FROM SDE.GDB_ITEMS_VW i LEFT JOIN SDE.GDB_ITEMTYPES t \" \\\n \"ON i.Type = t.UUID \" \\\n \"WHERE i.NAME IN (SELECT r.NAME_OF_DATASET FROM SDE.\" + self.db_config[\"request_table\"] + \" r)\" \\\n \"AND t.NAME IN ('Feature Dataset', 'Raster Dataset', 'Table', 'Raster Catalog', 'Mosaic Dataset') \" \\\n \"AND length(i.DOCUMENTATION) > 1 \" \\\n \"AND i.DOCUMENTATION IS NOT NULL \"\n\n cur = self.con.cursor()\n cur.prepare(query)\n cur.execute(None)\n cur.arraysize = 100\n result = cur.fetchall()\n metas = {}\n for r in result:\n self.__c_logger.info(\"DATASET FOUND ==> \" + str(r))\n metas[r[0]] = r[2].read()\n return metas\n except cx_Oracle.DatabaseError as e:\n self.__c_logger.exception(\"Error while fetching all data sets: \" + str(e))\n self.__f_logger.exception(\"Error while fetching all data sets: \" + str(e))\n raise DataException(\"Error while fetching all datasets: \" + str(e))\n finally:\n if cur is not None:\n cur.close()\n\n def find_max_id(self):\n \"\"\"\n Queries the maximum id value in the content table. If no entries are\n available the id will be set to 1\n\n :return: ID (Integer)\n :exception: DataException\n \"\"\"\n self.__c_logger.info(\"Find max id in content table\")\n self.__f_logger.info(\"Find max id in content table\")\n cur = None\n dataset_id = -1\n try:\n getId = \"SELECT MAX(c.OBJECTID) FROM SDE.\" + self.db_config[\"content_table\"] + \" c\"\n cur = self.con.cursor()\n cur.prepare(getId)\n cur.execute(None)\n result = cur.fetchall()\n if len(result) > 0:\n for r in result:\n dataset_id = r[0]\n break\n self.__c_logger.debug(\"MAX dataset ID in the content table = \" + str(dataset_id))\n if (dataset_id == None):\n self.__c_logger.debug(\"No entries in content table -> Set id to 0: ID = \" + str(dataset_id))\n dataset_id = 0\n return (dataset_id + 1)\n\n except cx_Oracle.DatabaseError as e:\n self.con.rollback()\n self.__c_logger.exception(\"EXCEPTION WHILE finding max id in content table: \" + str(e))\n self.__f_logger.exception(\"EXCEPTION WHILE finding max id in content table: \" + str(e))\n raise DataException(\"Exception while fetching max id in \" +\n \"SDE.ARCHIVE_ORDERS_EVW: \\n\" + str(e))\n finally:\n if cur is not None:\n cur.close()\n\n def find_id_by_name(self, dataset_name):\n self.__c_logger.info(\"Find id by name\")\n self.__f_logger.info(\"Find id by name\")\n cur = None\n dataset_id = -1\n try:\n getId = \"SELECT r.OBJECTID FROM SDE.\" + self.db_config[\"request_table\"] + \" r WHERE r.NAME_OF_DATASET = :data_name\"\n cur = self.con.cursor()\n cur.prepare(getId)\n cur.execute(None, {\"data_name\": dataset_name})\n result = cur.fetchall()\n if len(result) > 0:\n for r in result:\n dataset_id = r[0]\n break\n\n return dataset_id\n\n except cx_Oracle.DatabaseError as e:\n self.con.rollback()\n self.__c_logger.exception(\"EXCEPTION WHILE finding id by name: \" + str(e))\n self.__f_logger.exception(\"EXCEPTION WHILE finding id by name: \" + str(e))\n raise DataException(\"Exception while fetching id for \"\n + dataset_name\n + \" from SDE.ARCHIVE_ORDERS_EVW: \\n\" + str(e))\n finally:\n if cur is not None:\n cur.close()\n\n def add_process(self, dataset_name, remarks, org_name):\n \"\"\"\n Add a new process entry to the content table\n\n :param dataset_name: Name of the archived dataset (String)\n :param remarks: Notes about the current process state or failure (String)\n :param org_name: The original dataset name (String)\n :return: ID of the entry (Integer)\n :exception: DataException\n \"\"\"\n self.__c_logger.info(\"Add process information to the content table\")\n self.__f_logger.info(\"Add process information to the content table\")\n cur = None\n did = self.find_max_id()\n\n try:\n query = \"INSERT INTO \" \\\n \"SDE.\" + self.db_config[\"content_table\"] + \\\n \" (OBJECTID, NAME_OF_DATASET, DATE_OF_ARCHIVING, REMARKS, NAME_OF_DATASET_ORIGINAL) \" \\\n \"VALUES (:data_id, :data_name, :req_date, :remarks, :org)\"\n cur = self.con.cursor()\n cur_date = datetime.datetime.now()#datetime.date.today()\n cur.prepare(query)\n cur.execute(None, {'data_id': did, 'data_name': dataset_name, 'req_date': cur_date, 'remarks': remarks,\n 'org': org_name})\n self.con.commit()\n return did\n except cx_Oracle.DatabaseError as e:\n self.con.rollback()\n self.__c_logger.exception(\"EXCEPTION WHILE adding process information to the content table: \" + str(e))\n self.__f_logger.exception(\"EXCEPTION WHILE adding process information to the content table: \" + str(e))\n raise DataException(\"Exception while adding a process to SDE.ARCHIVE_CONTENT_EVW: \\n\" + str(e))\n finally:\n if cur is not None:\n cur.close()\n\n def update_state(self, data_id, state):\n \"\"\"\n Set the state of a row in the content table. The row is found by dataset ID\n\n :param data_id: Id of the related data set (Integer)\n :param state: The new value of the state column (String)\n :exception: DataException\n \"\"\"\n\n self.__c_logger.info(\"Update process information to the content table\")\n self.__f_logger.info(\"Update process information to the content table\")\n cur = None\n try:\n query = \"UPDATE SDE.\" + self.db_config[\"content_table\"] +\\\n \" c SET c.REMARKS = :state WHERE c.OBJECTID = :data_id\"\n cur = self.con.cursor()\n cur.prepare(query)\n cur.execute(None, {'state': state, 'data_id': data_id})\n self.con.commit()\n except cx_Oracle.DatabaseError as e:\n self.con.rollback()\n self.__c_logger.exception(\"EXCEPTION WHILE updating process information to the content table: \" + str(e))\n self.__f_logger.exception(\"EXCEPTION WHILE updating process information to the content table: \" + str(e))\n raise DataException(\"Exception while updating the state column of SDE.ARCHIVE_CONTENT_EVW: \\n\" + str(e))\n finally:\n if cur is not None:\n cur.close()\n\n def update_name(self, data_id, name):\n \"\"\"\n Set the name of a row in the content table. The row is found by dataset ID\n\n :param data_id: Id of the related dataset (Integer)\n :param name: The new value of the state column (String)\n :exception: DataException\n \"\"\"\n\n self.__c_logger.info(\"Update process information (name) to the content table\")\n self.__f_logger.info(\"Update process information (name) to the content table\")\n cur = None\n try:\n query = \"UPDATE SDE.\" + self.db_config[\"content_table\"] +\\\n \" c SET c.NAME_OF_DATASET = :name WHERE c.OBJECTID = :data_id\"\n cur = self.con.cursor()\n cur.prepare(query)\n cur.execute(None, {'name': name, 'data_id': data_id})\n self.con.commit()\n except cx_Oracle.DatabaseError as e:\n self.con.rollback()\n self.__c_logger.exception(\"EXCEPTION WHILE updating process information (name) to the content table: \"\n + str(e))\n self.__f_logger.exception(\"EXCEPTION WHILE updating process information (name) to the content table: \"\n + str(e))\n raise DataException(\"Exception while updating the name column of SDE.ARCHIVE_CONTENT_EVW: \\n\" + str(e))\n finally:\n if cur is not None:\n cur.close()\n\n def delete_by_id(self, data_id):\n \"\"\"\n Delete a row of the request table by id\n\n :param data_id: Dataset ID (Integer)\n \"\"\"\n self.__c_logger.info(\"Delete request from the request table\")\n self.__f_logger.info(\"Delete request from the request table\")\n\n cur = None\n try:\n query = \"DELETE FROM SDE.\" + self.db_config[\"request_table\"] + \" WHERE OBJECTID = :data_id\"\n cur = self.con.cursor()\n cur.prepare(query)\n cur.execute(None, {'data_id': data_id})\n self.con.commit()\n\n except cx_Oracle.DatabaseError as e:\n self.con.rollback()\n self.__c_logger.exception(\"EXCEPTION WHILE deleting request from the request table: \" + str(e))\n self.__f_logger.exception(\"EXCEPTION WHILE deleting request from the request table: \" + str(e))\n raise DataException(\"Exception while deleting the id \" +\n str(data_id) +\n \" column of SDE.ARCHIVE_ORDERS_EVW: \\n\" + str(e))\n finally:\n if cur is not None:\n cur.close()\n\n def add_meta_data(self, meta_data, arch_title):\n \"\"\"\n Add required and optional meta data to the database\n\n :param meta_data: Meta data object containing validated meta data (MetaData)\n :param arch_title: The title of the data set after it was copied to the archvie\n :exception: DataException\n \"\"\"\n self.__c_logger.info(\"Insert meta data into db\")\n self.__f_logger.info(\"Insert meta data into db\")\n cur = None\n\n try:\n query = \"INSERT INTO \" \\\n \"SDE.\" + self.arch_config['meta_data_table'] + \" (archive_title, title, topic, description, \" \\\n \"contact_name, contact_organisation, contact_position, contact_role, creation_date, content_lang, \" \\\n \"bounding_box_west, bounding_box_east, bounding_box_north, bounding_box_south, \" \\\n \"spatial_representation_type, spatial_reference_version, spatial_reference_space, \" \\\n \"spatial_reference_code, maintenance_update_frequency, maintenance_note) \" \\\n \"VALUES (:arch_title, :title, :topic, :description, :contact_name, :org, \" \\\n \":pos, :role, :create_date, :lang, :west, :east, :north, :south, \" \\\n \":sr_type, :sr_code, :sr_version, :sr_space, :m_freq, :m_note)\"\n cur = self.a_con.cursor()\n cur.prepare(query)\n cur.execute(None, {\n 'arch_title' : str(arch_title),\n 'title': self.__check_meta_data_value(meta_data, 'title'),\n 'topic': self.__check_meta_data_value(meta_data, 'topic'),\n 'description': self.__check_meta_data_value(meta_data, 'description'),\n 'contact_name': self.__check_meta_data_value(meta_data, 'contact_name'),\n 'pos': self.__check_meta_data_value(meta_data, 'contact_position'),\n 'org': self.__check_meta_data_value(meta_data, 'contact_organisation'),\n 'role': self.__check_meta_data_value(meta_data, 'contact_role'),\n 'lang': self.__check_meta_data_value(meta_data, 'content_lang'),\n 'east': self.__check_meta_data_value(meta_data, 'bounding_box_east'),\n 'west': self.__check_meta_data_value(meta_data, 'bounding_box_west'),\n 'north': self.__check_meta_data_value(meta_data, 'bounding_box_north'),\n 'south': self.__check_meta_data_value(meta_data, 'bounding_box_south'),\n 'create_date': self.__check_meta_data_value(meta_data, 'creation_date'),\n 'sr_type': self.__check_meta_data_value(meta_data, 'spatial_representation_type'),\n 'sr_version': self.__check_meta_data_value(meta_data, 'spatial_reference_version'),\n 'sr_space': self.__check_meta_data_value(meta_data, 'spatial_reference_space'),\n 'sr_code': self.__check_meta_data_value(meta_data, 'spatial_reference_code'),\n 'm_freq': self.__check_meta_data_value(meta_data, 'maintenance_frequency'),\n 'm_note': self.__check_meta_data_value(meta_data, 'maintenance_note')\n })\n self.a_con.commit()\n except Exception as e:\n self.__c_logger.exception(\"EXCEPTION while inserting meta data into db: \" + str(e))\n self.__f_logger.exception(\"EXCEPTION while inserting meta data into db: \" + str(e))\n try:\n self.con.rollback()\n except Exception as e:\n raise DataException(\"Exception while inserting meta data into db: \\n\" + str(e))\n raise DataException(\"Exception while inserting meta data into db: \\n\" + str(e))\n finally:\n if cur is not None:\n cur.close()\n\n def __check_meta_data_value(self, meta_data, value):\n result_value = None\n if str(value) not in meta_data.meta_data():\n for key in meta_data.meta_data():\n if str(value) == (str(key).split(\"$\"))[0]:\n value = str(key)\n try:\n result_value = (meta_data.meta_data())[value]\n result_value = (str(result_value).split(\"$\"))[0]\n except Exception as e:\n self.__c_logger.exception(\"EXCEPTION WHILE setting a meta data value: \" + str(value) + \": \" + str(e))\n self.__f_logger.exception(\"EXCEPTION WHILE setting a meta data value: \" + str(value) + \": \" + str(e))\n\n return result_value\n", "sub_path": "MetaDataService.py", "file_name": "MetaDataService.py", "file_ext": "py", "file_size_in_byte": 18309, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "cx_Oracle.DatabaseError", "line_number": 64, "usage_type": "attribute"}, {"api_name": "DataException.DataException", "line_number": 67, "usage_type": "call"}, {"api_name": "DataException.DataException", "line_number": 104, "usage_type": "call"}, {"api_name": "cx_Oracle.DatabaseError", "line_number": 138, "usage_type": "attribute"}, {"api_name": "DataException.DataException", "line_number": 141, "usage_type": "call"}, {"api_name": "cx_Oracle.DatabaseError", "line_number": 174, "usage_type": "attribute"}, {"api_name": "DataException.DataException", "line_number": 178, "usage_type": "call"}, {"api_name": "cx_Oracle.DatabaseError", "line_number": 202, "usage_type": "attribute"}, {"api_name": "DataException.DataException", "line_number": 206, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 234, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 234, "usage_type": "attribute"}, {"api_name": "cx_Oracle.DatabaseError", "line_number": 240, "usage_type": "attribute"}, {"api_name": "DataException.DataException", "line_number": 244, "usage_type": "call"}, {"api_name": "cx_Oracle.DatabaseError", "line_number": 268, "usage_type": "attribute"}, {"api_name": "DataException.DataException", "line_number": 272, "usage_type": "call"}, {"api_name": "cx_Oracle.DatabaseError", "line_number": 296, "usage_type": "attribute"}, {"api_name": "DataException.DataException", "line_number": 302, "usage_type": "call"}, {"api_name": "cx_Oracle.DatabaseError", "line_number": 324, "usage_type": "attribute"}, {"api_name": "DataException.DataException", "line_number": 328, "usage_type": "call"}, {"api_name": "DataException.DataException", "line_number": 388, "usage_type": "call"}, {"api_name": "DataException.DataException", "line_number": 389, "usage_type": "call"}]} +{"seq_id": "367962706", "text": "from __future__ import print_function\nimport gensim.downloader as api # package to download text corpus\nimport nltk # text processing\nfrom nltk.corpus import stopwords\nimport string\nimport numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.optim import SGD\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, TensorDataset\n\nimport csv\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pickle\n\n## Pick which model from part A to use \noptions = ['cbow','skipgram','lstm']\nselect = options[0]\n\nwith open('{}_w2i.pickle'.format(select), 'rb') as handle:\n org_word_to_idx = pickle.load(handle)\nwith open('{}_i2w.pickle'.format(select), 'rb') as handle:\n org_idx_to_word = pickle.load(handle)\n\n\n# collect all words to be removed\npunctuations = list(string.punctuation)\nstop = stopwords.words('english') + list(string.punctuation)\n\ndef make_vector(sentence,word_to_idx):\n sentence_vector = []\n for word in sentence.split():\n if word in word_to_idx.keys():\n sentence_vector.append(word_to_idx[word])\n else:\n sentence_vector.append(0)\n return sentence_vector\n\ndef remove_punctuations(sentence, punctuations):\n alts = ['\\/','-']\n for alt in alts:\n if alt in sentence:\n sentence = sentence.replace(alt,' ')\n final = \"\".join(u for u in sentence if u not in punctuations)\n return final\n\ndef pad_features(reviews_int, seq_length):\n ''' Return features of review_ints, where each review is padded with 0's or truncated to the input seq_length.\n '''\n features = np.zeros((len(reviews_int), seq_length), dtype = int)\n \n for i, review in enumerate(reviews_int):\n review_len = len(review)\n \n if review_len <= seq_length:\n zeroes = list(np.zeros(seq_length-review_len))\n new = zeroes+review \n elif review_len > seq_length:\n new = review[0:seq_length]\n \n features[i,:] = np.array(new)\n \n return torch.tensor(features, dtype=torch.long)\n\ndef make_vocab(filename,train):\n tsv_file = open(filename)\n\n read_tsv = csv.reader(tsv_file, delimiter=\"\\t\")\n headers = next(read_tsv, None)\n\n unique_words = set()\n sentences = []\n sentiments = []\n for i,row in enumerate(read_tsv):\n sentence, sentiment = row[2],row[3]\n # remove punctuation\n sentence = remove_punctuations(sentence, punctuations).lower().split()\n # remove stop words\n sentence = \" \".join([w for w in sentence if w not in stop])\n\n words = sentence.split()\n # remove very small and very large reviews\n if (len(words)>0 and len(words)<15) or not train:\n for word in words:\n unique_words.add(word)\n sentences.append(sentence)\n sentiments.append(int(sentiment))\n return list(unique_words),(sentences,sentiments)\n\ndef make_dataset(sentences, sentiments, word_to_idx, SEQ_LENGTH = 8, BATCH_SIZE = 50):\n\n train_x = [make_vector(sentence, word_to_idx) for sentence in sentences]\n ## pad to const seq_length\n train_x = pad_features(train_x,SEQ_LENGTH).cuda()\n train_y = torch.tensor(sentiments, dtype=torch.long).cuda()\n\n # create datasets\n train_data = TensorDataset(train_x, train_y)\n train_loader = DataLoader(train_data, shuffle=True, batch_size=BATCH_SIZE, drop_last=True)\n return train_loader\n\nBATCH_SIZE = 2048\nSEQ_LENGTH = 8\nEMBEDDING_SIZE = 300\n\n## Train\nvocab,(sentences,sentiments) = make_vocab(\"Data/train.csv\",train=True)\nword_to_idx = {w: i+1 for i, w in enumerate(vocab)}\nidx_to_word = {ix+1:word for ix, word in enumerate(vocab)}\nprint(\"Vocab size\", len(vocab))\n\ntrain_loader = make_dataset(sentences, sentiments, word_to_idx, SEQ_LENGTH, BATCH_SIZE)\n\n## Validation\n_,(val_sentences,val_sentiments) = make_vocab(\"Data/val.csv\",train=False)\nval_loader = make_dataset(val_sentences, val_sentiments, word_to_idx, SEQ_LENGTH, BATCH_SIZE)\n\n\n\nmatrix_len = len(vocab) + 1\nweights_matrix = np.zeros((matrix_len, EMBEDDING_SIZE))\nwords_found = 0\n\nembeddings_loaded = np.load('./Embeddings/{}_embeddings.npz'.format(select))['name1']\n\nfor word,i in word_to_idx.items():\n if word in org_word_to_idx.keys():\n words_found += 1\n weights_matrix[i] = embeddings_loaded[org_word_to_idx[word],:]\n else:\n weights_matrix[i] = np.random.normal(scale=0.6, size=(EMBEDDING_SIZE, ))\n\nweights_matrix[0] = np.random.normal(scale=0.6, size=(EMBEDDING_SIZE, ))\nweights_matrix = torch.from_numpy(weights_matrix).float()\nprint(\"Found {} out of {} words\".format(words_found,len(vocab)))\n\nclass SentimentLSTM(nn.Module):\n \n def __init__(self,corpus_size,output_size,embedd_dim,hidden_dim,n_layers):\n super().__init__()\n self.output_size = output_size\n self.n_layers = n_layers\n self.hidden_dim = hidden_dim\n \n self.embedding = nn.Embedding.from_pretrained(weights_matrix)\n self.lstm = nn.LSTM(embedd_dim, hidden_dim,n_layers,dropout=0.5, batch_first=True)\n self.dropout = nn.Dropout(0.3)\n self.fc = nn.Linear(hidden_dim,output_size)\n self.act = nn.Sigmoid()\n \n def forward(self,x,hidden):\n batch_size = x.size(0)\n embeds = self.embedding(x)\n lstm_out, hidden = self.lstm(embeds,hidden)\n lstm_out = lstm_out.contiguous().view(-1,self.hidden_dim)\n out = self.dropout(lstm_out)\n out = self.fc(out)\n out = self.act(out)\n out = out.view(batch_size,-1)\n out = out[:,-5:]\n return out, hidden\n\n def init_hidden(self,batch_size):\n weight = next(self.parameters()).data\n hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),\n weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())\n return hidden\n\n# Instantiate the model w/ hyperparams\nvocab_size = len(vocab)+1 # +1 for the 0 padding\noutput_size = 5\nembedding_dim = EMBEDDING_SIZE\nhidden_dim = 256\nn_layers = 2\nnet = SentimentLSTM(vocab_size, output_size, embedding_dim, hidden_dim, n_layers)\nprint(net)\n\nnet.train()\nclip=5\nepochs = 200\nprint_every = 100\nlr=0.01\n\ndef criterion(input, target):\n l = -(target * torch.log(F.softmax(input, dim=1) + 1e-10)).sum(1)\n return l.mean()\n\n## Whether we need to freeze the embedding or not\nfreeze_embeddings = True\nif freeze_embeddings:\n net.embedding.weight.requires_grad = False\noptimizer = torch.optim.Adam([ param for param in net.parameters() if param.requires_grad == True], lr=lr)\n\n# optimizer = torch.optim.Adam(net.parameters(), lr=lr)\n\nnet.cuda()\nlosses = []\ntrain_accs=[]\nval_accs = []\n\nos.makedirs(\"./LSTM2/\",exist_ok = True)\n\nfor e in range(epochs):\n PATH = './LSTM2/model_{}'.format(e)\n # initialize hidden state\n h = net.init_hidden(BATCH_SIZE)\n running_loss = 0.0\n running_acc = 0.0\n # batch loop\n for idx,(inputs, labels) in enumerate(train_loader):\n \n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n h = tuple([each.data for each in h])\n inputs, labels = inputs.cuda(), labels.cuda()\n\n # zero accumulated gradients\n optimizer.zero_grad()\n\n # get the output from the model\n output, h = net(inputs.cuda(), h)\n labels=torch.nn.functional.one_hot(labels, num_classes=5).cuda()\n # calculate the loss and perform backprop\n loss = criterion(output, labels)\n loss.backward()\n running_loss += loss.cpu().detach().numpy()\n running_acc += (output.argmax(dim=1) == labels.argmax(dim=1)).float().mean()\n\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.\n nn.utils.clip_grad_norm_(net.parameters(), clip)\n optimizer.step()\n \n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Loss: {:.6f}...\".format((running_loss/(idx+1))),\n \"Train acc: {}\".format((running_acc/(idx+1))) )\n \n losses.append(float(running_loss/(idx+1)))\n train_accs.append(float(running_acc/(idx+1)))\n\n ## do validation\n h = net.init_hidden(BATCH_SIZE) \n running_acc = 0.0\n\n for idx,(inputs, labels) in enumerate(val_loader):\n inputs, labels = inputs.cuda(), labels.cuda()\n # get the output from the model\n output, h = net(inputs.cuda(), h)\n labels = torch.nn.functional.one_hot(labels, num_classes=5).cuda()\n running_acc += (output.argmax(dim=1)==labels.argmax(dim=1)).float().mean()\n print(f'Val acc:{running_acc/(idx+1)}')\n val_accs.append(running_acc/(idx+1))\n \n torch.save(net.state_dict(), PATH)\n\ntrain_acc_all = train_accs\nval_acc_all = val_accs\nplt.figure()\nplt.plot(np.arange(len(train_acc_all)),train_acc_all)\nplt.plot(np.arange(len(train_acc_all)),val_acc_all)\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.yticks(np.arange(0,1.1,0.1))\nplt.savefig('{}.png'.format(\"train_fast_en\"))\nplt.show()\n\n## Testing\nnet.eval()\nnet.cuda()\n_,(test_sentences,test_sentiments) = make_vocab(\"Data/test.csv\", train=False)\n\ntest_x = [make_vector(sentence, word_to_idx) for sentence in test_sentences]\ntest_x = pad_features(test_x,SEQ_LENGTH)\ntest_y = torch.tensor(test_sentiments, dtype=torch.long).cuda()\n\nconfusion_matrix = np.zeros((5,5))\nh = net.init_hidden(1)\nacc = 0\nfor x,y in zip(test_x,test_y):\n h = tuple([each.data for each in h])\n x,y = x.unsqueeze(0).cuda(),y.cuda()\n \n # get the output from the model\n output, h = net(x, h)\n labels = torch.nn.functional.one_hot(y, num_classes=5).cuda()\n acc += (output.argmax()==labels.argmax()).float().mean()\n \n confusion_matrix[output.argmax()][labels.argmax()] += 1\n \nprint(\"Total test accuracy: \", acc/len(test_y))\nclass_wise_total = np.sum(confusion_matrix,axis=0)\n\nfor i in range(5):\n print(\"Sentiment - \", i,\" - accuracy - \",(confusion_matrix[i][i]/class_wise_total[i])*100)\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.heatmap(confusion_matrix, annot=True)\n\nplt.title(\"Confusion Matrix\")\nplt.xlabel('True label') \nplt.ylabel('Predicted') \n\nplt.savefig('{}.png'.format(\"cm_fast_en\"))\nplt.show()", "sub_path": "Assignment3/Code/Part_B/q2_other_lstm.py", "file_name": "q2_other_lstm.py", "file_ext": "py", "file_size_in_byte": 10253, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pickle.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 27, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 31, "usage_type": "attribute"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 32, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 32, "usage_type": "name"}, {"api_name": "string.punctuation", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 67, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 137, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 141, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 141, "usage_type": "name"}, {"api_name": "torch.nn.Embedding.from_pretrained", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 149, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 149, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 150, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 151, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 152, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 153, "usage_type": "name"}, {"api_name": "torch.log", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 189, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 196, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 226, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.nn.utils", "line_number": 234, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 234, "usage_type": "name"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 252, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 252, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 261, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 279, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 290, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 290, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 296, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 306, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 309, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 309, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 310, "usage_type": "name"}]} +{"seq_id": "398520424", "text": "import numpy\nimport matplotlib.pyplot as plt\n\n#probability to move up or down\nprob = [ 0.05 , 0.95 ]\n\n#defining starting position\nstart = 3\nposition = [start]\n\n#generating random points\nr = numpy.random.random(1000)\ndown_p = r < prob[0]\nup_p = r > prob[1]\n\n#movement\n#using zip to combine the randomly generated sequences\nfor idown_p, iup_p in zip(down_p, up_p):\n down = idown_p and position[-1] > 1\n up = iup_p and position[-1] < 4\n position.append(position[-1] - down + up)\n\n#plotting the walk\nplt.plot(position)\nplt.show()\n", "sub_path": "Random walk 1D.py", "file_name": "Random walk 1D.py", "file_ext": "py", "file_size_in_byte": 535, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.random.random", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 12, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "131710416", "text": "import numpy as np \nimport pandas as pd \n# import seaborn as sb \nimport os, sys\nimport matplotlib.pyplot as plt \nimport skimage\nfrom skimage.transform import resize\nfrom PIL import Image\nfrom cnn_model_small import cnn_model_small\n\nPATH_TO_TRAIN='data/train/'\nNUM_CHANNELS = 4\nNUM_LABELS = 28\n\n# def fill_targets(row):\n# row.Target = np.array(row.Target.split(\" \")).astype(np.int)\n# for num in row.Target:\n# name = label_names[int(num)]\n# row.loc[name] = 1\n# return row\n# def fill_targets(row):\n# row.Target = np.array(row.Target.split(\" \")).astype(np.int)\n# for num in row.Target:\n# row.loc[label_names[int(num)]] = 1\n# return row\n\nclass DataHandler:\n def __init__(self, \n metadata,\n image_dims,\n batch_size):\n \n self._metadata = metadata\n self._image_dims = image_dims\n self._batch_size = batch_size\n\n\n @property\n def size(self):\n return self._metadata.shape[0]\n\n @property\n def metadata(self):\n return self._metadata\n\n def load_image(self,path_to_image):\n red_ch = Image.open(path_to_image+'_red.png')\n green_ch = Image.open(path_to_image+'_green.png')\n blue_ch = Image.open(path_to_image+'_blue.png')\n yellow_ch = Image.open(path_to_image+'_yellow.png')\n \n image = np.stack((\n red_ch, \n green_ch, \n blue_ch, \n yellow_ch), axis=-1) \n\n image = resize(image, (self._image_dims[0], self._image_dims[1]), mode='reflect')\n\n return image\n\n def load_image_greenonly(self,path_to_image):\n red_ch = Image.open(path_to_image+'_red.png')\n green_ch = Image.open(path_to_image+'_green.png')\n blue_ch = Image.open(path_to_image+'_blue.png')\n yellow_ch = Image.open(path_to_image+'_yellow.png')\n \n # modify later\n red_ch += (np.array(yellow_ch)//2).astype(np.uint8) \n blue_ch += (np.array(yellow_ch)//2).astype(np.uint8)\n\n image = np.array(green_ch)\n\n image = resize(image, (self._image_dims[0], self._image_dims[1]), mode='reflect')\n\n return image\n\n def supply_batch(self, greenonly):\n while True:\n indices = np.random.choice(len(self._metadata), self._batch_size)\n batch_images = np.empty((self._batch_size, self._image_dims[0], self._image_dims[1], self._image_dims[2]))\n batch_labels = np.zeros((self._batch_size, NUM_LABELS))\n\n for i, index in enumerate(indices):\n if greenonly: \n temp = self.load_image_greenonly(self._metadata[index]['path'])\n else:\n temp = self.load_image(self._metadata[index]['path'])\n\n batch_images[i] = temp\n batch_labels[i][self._metadata[index]['labels']] = 1\n yield batch_images, batch_labels\n\n\nif __name__ == \"__main__\":\n\n\n train_data = pd.read_csv('data/train.csv')\n train_metadata = []\n for filename, labels in zip(train_data['Id'], train_data['Target'].str.split(' ')):\n train_metadata.append({\n 'path': os.path.join(PATH_TO_TRAIN, filename),\n 'labels': np.array([int(label) for label in labels])\n })\n\n train_metadata = np.array(train_metadata)\n train_dh = DataHandler(train_metadata, [512,512,4], 4)\n image_0 = train_dh.load_image(train_metadata[0]['path'])\n print(image_0[:,100,0])\n plt.figure()\n plt.imshow(image_0)\n plt.show()\n\n\n # # Fetch a batch\n # train_batch = train_dh.supply_batch(greenonly=False)\n # images, labels = next(train_batch)\n\n # # Display the batch\n # fig, ax = plt.subplots(1,4,figsize=(20,5))\n\n # for i in range(4):\n # ax[i].imshow(images[i])\n # # ax[i].set_title(labels[i])\n # # ax[i].set_facecolor('black')\n # plt.show()\n\n # model = cnn_model(images)\n\n\n # print('min: {0}, max: {1}'.format(images.min(), images.max()))\n################################################################################################################\n\n\n # train_labels = pd.read_csv(\"data/train.csv\")\n # # print(train_labels.head())\n # # print(train_labels.shape)\n\n # label_names = {\n # 0: \"Nucleoplasm\", \n # 1: \"Nuclear membrane\", \n # 2: \"Nucleoli\", \n # 3: \"Nucleoli fibrillar center\", \n # 4: \"Nuclear speckles\",\n # 5: \"Nuclear bodies\", \n # 6: \"Endoplasmic reticulum\", \n # 7: \"Golgi apparatus\", \n # 8: \"Peroxisomes\", \n # 9: \"Endosomes\", \n # 10: \"Lysosomes\", \n # 11: \"Intermediate filaments\", \n # 12: \"Actin filaments\", \n # 13: \"Focal adhesion sites\", \n # 14: \"Microtubules\", \n # 15: \"Microtubule ends\", \n # 16: \"Cytokinetic bridge\", \n # 17: \"Mitotic spindle\", \n # 18: \"Microtubule organizing center\", \n # 19: \"Centrosome\", \n # 20: \"Lipid droplets\", \n # 21: \"Plasma membrane\", \n # 22: \"Cell junctions\", \n # 23: \"Mitochondria\", \n # 24: \"Aggresome\", \n # 25: \"Cytosol\", \n # 26: \"Cytoplasmic bodies\", \n # 27: \"Rods & rings\"\n # }\n\n # for key, val in label_names.items():\n # train_labels[val] = 0\n\n # train_labels = train_labels.apply(fill_targets, axis = 1)\n # print(train_labels.head())\n\n # # plot frequency\n\n", "sub_path": "gcloud/data_handler.py", "file_name": "data_handler.py", "file_ext": "py", "file_size_in_byte": 5464, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "PIL.Image.open", "line_number": 47, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 47, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 48, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 48, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 49, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 49, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 50, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 52, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 58, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 63, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 64, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 64, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 65, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 80, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}]} +{"seq_id": "496104007", "text": "import os\nfrom flask import Flask, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\n\napp = Flask(__name__) #was dis?\n\n# set base directory\nbasedir = os.path.abspath(os.path.dirname(__file__)) #?\n\n# SQLite Database\nDATABASE = 'sqlite:///' + os.path.join(\n basedir, 'db.wutang')\n\n# DATABASE = 'postgresql://localhost/wutang'\n\napp.config['SQLALCHEMY_DATABASE_URI'] = DATABASE\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False #?\n\n# init database\ndb = SQLAlchemy(app) #?\n\n# init Marshmallow\nmarshmallow = Marshmallow(app)\n\nDEBUG = True\nPORT = 8000\n\n@app.route('/') #?\ndef hello_world():\n return 'Hello World'\n@app.route('/post', methods=['POST'])\n@app.route('/post/', methods=['GET'])\ndef create_post(postid=None):\n from models import Post\n if postid == None:\n name = request.json['name']\n profile_name = request.json['profile_name']\n email = request.json['email']\n return Post.create_post(name, profile_name, email)\n else:\n return Post.get_post(postid)\n\nif __name__ == '__main__':\n app.run(debug=DEBUG, port=PORT)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1116, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 21, "usage_type": "call"}, {"api_name": "flask_marshmallow.Marshmallow", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "models.Post.create_post", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 40, "usage_type": "name"}, {"api_name": "models.Post.get_post", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "25521544", "text": "from logging import getLogger\nfrom turtle_zero.config import Config\nfrom turtle_zero.agent.api_turtle import TurtleModelAPI\n\nlogger = getLogger(__name__)\n\nclass TurtleModelPredictions:\n def __init__(self, config: Config, agent_model, dataset):\n self.config = config\n self.agent_model = agent_model\n self.dataset = dataset\n\n def save(self, predictions_path):\n logger.debug(f\"save predictions to {predictions_path}\")\n with open(predictions_path, \"wt\") as f:\n #json.dump(self.model.get_config(), f)\n #self.model.save_predictions(predictions_path, self.dataset)\n self.save_predictions(predictions_path, self.agent_model, self.dataset)\n #self.digest = self.fetch_digest(predictions_path)\n #logger.debug(f\"saved model digest {self.digest}\")\n\n def save_predictions(self, predictions_path, agent_model, dataset):\n #Calculate profit\n self.api = TurtleModelAPI(self.config, self.agent_model)\n #score = 1\n state_ary, exit_ary, z_ary = self.dataset\n logger.debug(f\"state_ary ndim = {state_ary.ndim} size = {state_ary.size} shape = {state_ary.shape} len = {len(state_ary)}\")\n arylength = len(state_ary)\n #create file\n predict_file = self.create_file(predictions_path)\n for aryindex in range(0, arylength):\n #for s_ary, exit_ary in self.dataset:\n exit_predicts, value_predicts = self.api.predict(state_ary[aryindex])\n #write predicts to file\n self.writepredicts(predict_file, aryindex, exit_predicts[0], value_predicts[0])\n #logger.debug(f\"exit_ary ndim = {exit_ary.ndim} size = {exit_ary.size} shape = {exit_ary.shape} len = {len(exit_ary)}\")\n #logger.debug(f\"exit_predicts ndim = {exit_predicts.ndim} size = {exit_predicts.size} shape = {exit_predicts.shape} len = {len(exit_predicts)}\")\n #score *= self.exit_score(exit_ary[aryindex], exit_predicts[0])\n #logger.debug(f\"score = {score}\")\n #close file\n self.close_file(predict_file)\n \n def create_file(self, prediction_path):\n self.predictfile = open(prediction_path, \"w+\")\n\n def writepredicts(self, predict_file, aryindex, exit_predicts, value_predicts):\n self.predictfile.write(\"%d %f\\n\" % (aryindex, value_predicts))\n exit_predicts_list = exit_predicts.tolist()\n for predictitem in exit_predicts_list:\n self.predictfile.write(\"%f \" % predictitem)\n self.predictfile.write(\"\\n\")\n\n def close_file(self, predict_file):\n self.predictfile.close()\n \n \"\"\"\n def exit_score(self, exit_ary, exit_predicts):\n exit_ary_list = exit_ary.tolist()\n exit_predicts_list = exit_predicts.tolist()\n maxvalue = max(exit_predicts_list)\n minvalue = min(exit_predicts_list)\n buyprofit = maxvalue - 0.5\n sellprofit = 0.5 - minvalue\n buyindex = exit_predicts_list.index(maxvalue)\n sellindex = exit_predicts_list.index(minvalue)\n if buyprofit >= sellprofit:\n if exit_ary_list[buyindex] >= 0.5:\n return (1 / ((1 - (exit_ary_list[buyindex] - 0.5) * 2) ** 0.166667)) ** (1 / (buyindex+1)) #((1 - Math.Pow(kdata.klineList[i].Close / kdata.klineList[j].Close, 6)) / 2 + 0.5)\n else:\n return (((exit_ary_list[buyindex] - 0.5) * 2 + 1) ** 0.166667) ** (1 / (buyindex+1)) \n else:\n if exit_ary_list[sellindex] <= 0.5:\n return (1 / (((exit_ary_list[buyindex] - 0.5) * 2 + 1) ** 0.166667)) ** (1 / (buyindex+1)) #((Math.Pow(kdata.klineList[j].Close / kdata.klineList[i].Close, 6) - 1) / 2 + 0.5)\n else:\n return ((1 - (exit_ary_list[buyindex] - 0.5) * 2) ** 0.166667) ** (1 / (buyindex+1)) \n \"\"\"\n", "sub_path": "src/turtle_zero/agent/predictions_turtle.py", "file_name": "predictions_turtle.py", "file_ext": "py", "file_size_in_byte": 3807, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "turtle_zero.config.Config", "line_number": 8, "usage_type": "name"}, {"api_name": "turtle_zero.agent.api_turtle.TurtleModelAPI", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "47739597", "text": "from .provider_test import ProviderTest\nfrom gunpowder import *\nfrom itertools import product\nfrom unittest import skipIf\nimport itertools\nimport numpy as np\nimport logging\n\n\nclass ExampleSource(BatchProvider):\n\n def setup(self):\n\n self.provides(\n ArrayKeys.GT_LABELS, ArraySpec(\n roi=Roi((-40, -40, -40), (160, 160, 160)),\n voxel_size=(20, 4, 8),\n interpolatable=False))\n self.provides(\n ArrayKeys.GT_MASK, ArraySpec(\n roi=Roi((-40, -40, -40), (160, 160, 160)),\n voxel_size=(20, 4, 8),\n interpolatable=False))\n\n def provide(self, request):\n\n batch = Batch()\n\n roi = request[ArrayKeys.GT_LABELS].roi\n shape = (roi/self.spec[ArrayKeys.GT_LABELS].voxel_size).get_shape()\n spec = self.spec[ArrayKeys.GT_LABELS].copy()\n spec.roi = roi\n\n batch.arrays[ArrayKeys.GT_LABELS] = Array(\n np.random.randint(\n 0, 2,\n shape\n ),\n spec\n )\n\n roi = request[ArrayKeys.GT_MASK].roi\n shape = (roi/self.spec[ArrayKeys.GT_MASK].voxel_size).get_shape()\n spec = self.spec[ArrayKeys.GT_MASK].copy()\n spec.roi = roi\n\n batch.arrays[ArrayKeys.GT_MASK] = Array(\n np.random.randint(\n 0, 2,\n shape\n ),\n spec\n )\n\n return batch\n\n\nclass TestAddAffinities(ProviderTest):\n\n def test_output(self):\n\n neighborhood = [\n Coordinate((-2,0,0)),\n Coordinate((0,-1,0)),\n Coordinate((0,0,1)),\n Coordinate((1,1,1))\n ]\n\n pipeline = (\n ExampleSource() +\n AddAffinities(\n neighborhood,\n labels=ArrayKeys.GT_LABELS,\n labels_mask=ArrayKeys.GT_MASK,\n affinities=ArrayKeys.GT_AFFINITIES,\n affinities_mask=ArrayKeys.GT_AFFINITIES_MASK)\n )\n\n with build(pipeline):\n\n for i in range(10):\n\n request = BatchRequest()\n request.add(ArrayKeys.GT_LABELS, (100,16,64))\n request.add(ArrayKeys.GT_MASK, (100,16,64))\n request.add(ArrayKeys.GT_AFFINITIES, (100,16,64))\n request.add(ArrayKeys.GT_AFFINITIES_MASK, (100,16,64))\n\n batch = pipeline.request_batch(request)\n\n self.assertTrue(ArrayKeys.GT_LABELS in batch.arrays)\n self.assertTrue(ArrayKeys.GT_MASK in batch.arrays)\n self.assertTrue(ArrayKeys.GT_AFFINITIES in batch.arrays)\n self.assertTrue(ArrayKeys.GT_AFFINITIES_MASK in batch.arrays)\n\n labels = batch.arrays[ArrayKeys.GT_LABELS]\n labels_mask = batch.arrays[ArrayKeys.GT_MASK]\n affs = batch.arrays[ArrayKeys.GT_AFFINITIES]\n affs_mask = batch.arrays[ArrayKeys.GT_AFFINITIES_MASK]\n\n self.assertTrue((len(neighborhood),) + labels.data.shape == affs.data.shape)\n\n voxel_roi = Roi((0,0,0), labels.data.shape)\n for (z,y,x) in product(*[range(d) for d in labels.data.shape]):\n\n p = Coordinate((z,y,x))\n\n for n in range(len(neighborhood)):\n\n pn = p + neighborhood[n]\n if not voxel_roi.contains(pn):\n continue\n\n a = labels.data[p]\n b = labels.data[pn]\n masked = (\n labels_mask.data[p] == 0 or\n labels_mask.data[pn] == 0)\n\n if a == b and a != 0 and b != 0:\n self.assertEqual(affs.data[(n,)+p], 1.0, \"%s -> %s, %s -> %s, but is not 1\"%(p, pn, a, b))\n else:\n self.assertEqual(affs.data[(n,)+p], 0.0, \"%s -> %s, %s -> %s, but is not 0\"%(p, pn, a, b))\n if masked:\n self.assertEqual(affs_mask.data[(n,)+p], 0.0, (\n \"%s or %s are masked, but mask is not 0\"%\n (p, pn)))\n", "sub_path": "tests/cases/add_affinities.py", "file_name": "add_affinities.py", "file_ext": "py", "file_size_in_byte": 4285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.random.randint", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 48, "usage_type": "attribute"}, {"api_name": "provider_test.ProviderTest", "line_number": 58, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "284210844", "text": "from django.contrib.contenttypes.models import ContentType\n\ntry:\n from django.apps import apps\n get_model = apps.get_model\nexcept ImportError:\n from django.db.models.loading import get_model\n\nimport datetime\ntry:\n from django.utils import timezone\n now = timezone.now\nexcept ImportError:\n now = datetime.datetime.now\n\n\ndef rep_handler(**kwargs):\n \"\"\"\n Handler function to create Increase Rep obj and create RepAction instance upon\n rep signal call.\n \"\"\"\n\n # get variables from rep.send\n user = kwargs.pop('sender')\n\n try:\n target = kwargs.pop('target')\n except KeyError:\n target = None\n\n try:\n val = kwargs.pop('val')\n except KeyError:\n val = 0\n\n new_action = get_model('djrep.RepAction')(\n target_content_type_id=ContentType.objects.get_for_model(target).id,\n target_object_id=target.pk,\n user_id=user.pk,\n timestamp=kwargs.pop('timestamp', now()),\n val=val,\n old_rep_val=user.rep.val,\n )\n\n new_action.save(force_insert=True)\n\n if val:\n user.rep.val += val\n user.rep.save()\n\n return new_action\n", "sub_path": "djrep/receivers.py", "file_name": "receivers.py", "file_ext": "py", "file_size_in_byte": 1142, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.apps.apps.get_model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.apps.apps", "line_number": 5, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 12, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.db.models.loading.get_model", "line_number": 36, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model", "line_number": 37, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "216846214", "text": "print('进程队列-Queue')\n\n#进程模块\n#import multiprocessing\\\nfrom multiprocessing import Process,Queue\n\nimport time\nimport os\n\ndef run(q):\n #time.sleep(2)\n q.put(['www',123])\n print('subid=',id(q))\n\n#windows 下必须加下面这句\nif __name__ == '__main__':\n #创建一个队列\n q = Queue()\n print('mainid=',id(q))\n p_list = []\n for p in range(3):\n p = Process(target=run,args=(q,))\n p_list.append(p)\n p.start()\n # for p in p_list:\n # p.join() #这里如果使用join 则主进程会等待子进程全部执行完毕 再执行\n print('end')\n print(q.get())# get的时候就会阻塞,直到有值才会往下走\n print(q.get())\n print(q.get())\n #主进程皮一下\n run(q)\n", "sub_path": "process/process03.py", "file_name": "process03.py", "file_ext": "py", "file_size_in_byte": 769, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "multiprocessing.Queue", "line_number": 18, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "216279953", "text": "import sqlite3\n\nconn = sqlite3.connect('myDb.db')\nfileList = ('information.docx', 'Hello.txt', 'myImage.png', 'myMovie.mpg', 'World.txt', 'data.pdf', 'myPhoto.jpg')\n\nwith conn:\n cur = conn.cursor()\n cur.execute('CREATE TABLE IF NOT EXISTS tbl_files( \\\n file_id INTEGER PRIMARY KEY AUTOINCREMENT, \\\n file_txt_type TEXT \\\n )')\n conn.commit()\n \n for x in fileList:\n if x.endswith('.txt'):\n cur.execute('INSERT INTO tbl_files(file_txt_type) VALUES (?)', (x,))\n conn.commit()\n \n cur.execute('SELECT file_txt_type FROM tbl_files')\n txtFiles = cur.fetchall()\n print('All .txt files currently in the directory: \\n')\n i = 1\n for file in txtFiles:\n msg = 'File {}: {}'.format(i, file[0])\n print(msg)\n i += 1\nconn.close()\n", "sub_path": "assignments/assignment-page-162.py", "file_name": "assignment-page-162.py", "file_ext": "py", "file_size_in_byte": 842, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sqlite3.connect", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "101992855", "text": "\"\"\"\nFile name: prediction_score_for_multiprocess.py\nAuthor: yoshi, shoichi\nDescription: Script for converting prediction score to table\nDate: 15 July 2019\n\"\"\"\n\n\nimport argparse\nfrom functools import partial\nfrom multiprocessing import Pool, Manager\nimport pickle\nimport pprint\nimport sys\nimport time\n\nimport joblib\nimport pandas as pd\nfrom scipy import stats\n\n\nclass dotdict(dict):\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n \n def __getstate__(self):\n return self.__dict__\n \n def __setstate__(self, dict):\n self.__dict__ = dict\n\n\ndef build_node_name(filename):\n \"\"\" To convert node ID to gene/chemical name \"\"\"\n print(f'\\n== Prep node names list ==\\n'\n f'load: {filename}') # node data='dataset_node.csv'\n with open(filename, 'r') as f:\n node_names = [l.strip() for l in f]\n print(f'#node_names: {len(node_names)}')\n return node_names\n\n\ndef build_test_label_pairs(filename, cv):\n \"\"\" To make test label pair list \"\"\"\n # import main result data (post caluculation jbl file)\n print(f'\\n== Prep test label pairs list ==\\n'\n f'load: {filename}\\n'\n f'cv fold: {cv}')\n result_data = joblib.load(filename)\n test_labels = result_data[cv]['test_labels']\n test_label_pairs = []\n\n for i in test_labels[0]:\n test_label_pair = (i[0], i[2])\n test_label_pair = tuple(sorted(test_label_pair))\n test_label_pairs.append(test_label_pair)\n\n print(f'#test_label_pairs: {len(test_label_pairs)}\\n'\n f'Remove duplicate.')\n test_label_pairs = list(set(test_label_pairs)) # remove duplicated in list of test_label_pairs\n print(f'#duplicate-removed test_label_pairs: {len(test_label_pairs)}\\n'\n f'Completed to prep test label list.')\n return test_label_pairs\n\n\ndef build_target_label_pairs(filename): # args.dataset (input data jbl file)\n \"\"\"To make all prediction target (train+test) label pair list\"\"\"\n # import all edge label data (input data for establish model, train + test) \n print(f'\\n== Prep all target label pairs list ==\\n'\n f'load: {filename}')\n input_data = joblib.load(filename)\n label_list = input_data['label_list']\n target_label_pairs = []\n\n for i in label_list[0]:\n label_pair = (i[0], i[2])\n label_pair = tuple(sorted(label_pair))\n target_label_pairs.append(label_pair)\n\n print(f'#target_label_pairs: {len(target_label_pairs)}\\n'\n f'Remove duplicate.')\n target_label_pairs = list(set(target_label_pairs)) # remove duplicated in list of target_label_pairs\n print(f'#duplicate-removed target_label_pairs: {len(target_label_pairs)}\\n'\n f'Completed to prep target label list.')\n return target_label_pairs\n\n\ndef sort_prediction_score(filename, cv, target_label_pairs, test_label_pairs, score_rank, cutoff, train, edge_type):\n \"\"\" Sort prediction result array matrix and Set threshold \"\"\"\n print('\\n== Sort predisction score ==')\n print(f'load: {filename}')\n with open(filename, 'rb') as f: # only activate when test sample data\n result_data = pickle.load(f) # only activate when test sample data\n # result_data = joblib.load(filename)\n print(f'cv fold: {cv}')\n # prediction = result_data[cv]['prediction_data']\n # matrix = prediction[0]\n matrix = result_data # only activate when test sample data\n print(f'prediction score matrix shape: {matrix.shape}\\n'\n f'\\nPrep list of [(score,row,col)] from prediction score results matrix.')\n dim_row = matrix.shape[0]\n dim_col = matrix.shape[1]\n score_row_col = [(matrix[row, col], row, col) for row in range(dim_row) for col in range(row+1, dim_col)]\n print(f'#scores as adopted: {len(score_row_col)}') # should be 480577503\n\n if edge_type == 'ppi':\n \"\"\" protein-protein \"\"\"\n print(f'Pick protein-protein interaction.')\n ppi1 = [i for i in score_row_col if i[1] < 3071 or i[1] > 14506]\n ppi = [i for i in ppi1 if i[2] < 3071 or i[2] > 14506]\n print(f'#total protein-protein edge: {len(ppi)}\\n') # should be 191423961\n edgetype_selection_score = ppi\n\n elif edge_type == 'pci':\n \"\"\" protein-chemical \"\"\"\n print(f'Pick protein-chemical interaction.')\n pci1 = [i for i in score_row_col if i[1] < 3071 and 3070 < i[2] < 14507]\n pci2 = [i for i in score_row_col if 3070 < i[1] < 14507 and 14506 < i[2] < 31003]\n pci = pci1 + pci2\n print(f'#total protein-chemical edge: {len(pci)}\\n') # should be 223768212\n edgetype_selection_score = pci\n\n elif edge_type == 'cci':\n \"\"\" chemical-chemical \"\"\" \n print(f'Pick chemical-chemical interaction.')\n cci = [i for i in score_row_col if 3070 < i[1] < 14507 and 3070 < i[2] < 14507]\n print(f'#total chemical-chemical edge: {len(cci)}\\n') # should be 65385330\n edgetype_selection_score = cci\n\n else:\n print(\"[ERROR] Choose edge_type from 'ppi', 'pci', 'cci'\")\n sys.exit(1)\n\n # sort scores with descending order\n print('Sort scores and pre-pick toplist by cutoff value.')\n edgetype_selection_score.sort(reverse=True) # Sort list based on \"score\" with a decending order\n score_sort = edgetype_selection_score[:cutoff] # args.cutoff: Pick top list using arbitrary threshold\n print(f'#pre-picked top score list: {len(score_sort)}')\n\n if train:\n print(f'(Train labels are included for preparing score-ordred list.)\\n'\n f'Pick toplist by score_rank.')\n score_sort_toplist = score_sort[:score_rank] # args.score_rank: Select top score ranking to export\n print(f'#score post pick score-rank: {len(score_sort_toplist)}\\n'\n f'Completed to prep prediction score-ordered list including train labels.')\n return score_sort_toplist\n else:\n print(f'(Train labels are excluded for preparing score-ordred list.)\\n'\n f'Pick toplist by score_rank.')\n train_label_pairs = list(set(target_label_pairs) - set(test_label_pairs)) # Prep target,test,train label list\n score_tmp = [i for i in score_sort if (i[1], i[2]) not in set(train_label_pairs)]\n score_tmp.sort(reverse=True)\n score_sort_toplist = score_tmp[:score_rank]\n print(f'#score post pick score-rank: {len(score_sort_toplist)}\\n'\n f'Completed to prep prediction score-ordered list w/o train labels.')\n return score_sort_toplist\n\n\ndef convert(score_sort_toplist, target_label_pairs, test_label_pairs, node_names, train, total_list):\n \"\"\"\n let score-sorted list [(score,row,col)...] convert to table\n total_list = (scores, rows, cols, gene1, gene2, train_edge, test_edge, new_edge)\n \"\"\"\n tmp_list = []\n if train:\n for i in score_sort_toplist:\n scores = i[0]\n row = i[1]\n gene1 = node_names[row]\n col = i[2]\n gene2 = node_names[col]\n prediction_label_pair = (row, col)\n if prediction_label_pair in target_label_pairs:\n if prediction_label_pair in test_label_pairs:\n tmp_list.append([scores, row, col, gene1, gene2, 0, 1, 0])\n else:\n tmp_list.append([scores, row, col, gene1, gene2, 1, 0, 0])\n else:\n tmp_list.append([scores, row, col, gene1, gene2, 0, 0, 1])\n else:\n for i in score_sort_toplist:\n scores = i[0]\n row = i[1]\n gene1 = node_names[row]\n col = i[2]\n gene2 = node_names[col]\n prediction_label_pair = (row, col)\n if prediction_label_pair in test_label_pairs:\n tmp_list.append([scores, row, col, gene1, gene2, 0, 1, 0])\n else:\n tmp_list.append([scores, row, col, gene1, gene2, 0, 0, 1])\n total_list.extend(tmp_list)\n\n\ndef process_table(rows, cols, gene1, gene2, scores, train_edge, test_edge, new_edge):\n \"\"\" To build a table \"\"\"\n print('\\n== Process curated prediction score to build a table ==')\n table = pd.DataFrame({\n \"row\": rows,\n \"col\": cols,\n \"gene1\": gene1,\n \"gene2\": gene2,\n \"score\": scores,\n \"train_edge\": train_edge,\n \"test_edge\": test_edge,\n \"new_edge\": new_edge\n })\n # print('#table shape: ', table.shape)\n table = table.assign(score_ranking=len(table.score) - stats.rankdata(table.score, method='max') + 1)\n print('Sort the table with score-descending order.')\n table_sort_score = table.sort_values(by='score', ascending=False)\n table_sort_score = table_sort_score[['row', 'col', 'gene1', 'gene2', 'score', 'score_ranking', 'train_edge',\n 'test_edge', 'new_edge']]\n print(f'#final table shape: {table.shape}\\n'\n f'Completed processing to build a table.')\n return table_sort_score\n\n\ndef enrichment(target_label_pairs, test_label_pairs, table_sort_score, cv, train, edge_type):\n print('\\n== Calculate enrichment ==')\n train_label_pairs = list(set(target_label_pairs) - set(test_label_pairs)) # prep train edges list\n\n if train:\n if edge_type == 'ppi':\n total = 191423961\n elif edge_type == 'pci':\n total = 223768212\n elif edge_type == 'cci':\n total = 65385330\n else:\n print(\"[ERROR] Choose edge_type from 'ppi', 'pci', 'cci'\")\n sys.exit(1)\n\n total_wo_train = total - len(train_label_pairs) # remove train edges from total\n total_test_edges = len(test_label_pairs)\n table_wo_train = table_sort_score[table_sort_score.train_edge == 0] # prep table w/o train edges (remove train from the table)\n print(f'Summary of edges attribution\\n'\n f'cv fold: {cv}\\n'\n f'#total as scored: {total}\\n'\n f'#total_w/o_train_edges: {total_wo_train}\\n'\n f'#total_target_edges: {len(target_label_pairs)}\\n'\n f'#total_train_edges: {len(train_label_pairs)}\\n'\n f'#total_test_edges: {len(test_label_pairs)}\\n')\n\n # enrichment calcucation\n top = [0.1, 0.5, 1.0] # top: 0.1%, 0.5%, 1%, 3%, 5%\n for i in top:\n ratio = i*0.01\n top_ratio = round(total_wo_train*ratio) # calculate the number of top list based on top%\n table_wo_train_toplist = table_wo_train.iloc[:top_ratio, ] # pick top list from the table w/o train edges\n test_edges_in_toplist = len(table_wo_train_toplist[table_wo_train_toplist.test_edge == 1].index)\n test_edges_enrichment = test_edges_in_toplist/total_test_edges\n print(f'#top%: {i}\\n'\n f'#top_ratio: {top_ratio}\\n'\n f'#test_edges_in_toplist: {test_edges_in_toplist}\\n'\n f'#test edges enrichment top{i}%: {test_edges_enrichment}\\n')\n\n else:\n pass # build later...\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--result', type=str, help=\"input result: gcn_cv.jbl\")\n parser.add_argument('--dataset', type=str, help=\"input dataset: dataset.jbl\")\n parser.add_argument('--node', type=str, help=\"input dataset node: dataset_node.csv\")\n parser.add_argument('--cv', default=0, type=int, help=\"cross validation: select 0,1,2,3,4\")\n parser.add_argument('--output', type=str, help=\"output:score.txt\")\n parser.add_argument('--output_pkl', type=str, help=\"output:score.pkl\")\n parser.add_argument('--score_rank', default=10000, type=int, help='pick score ranking from 1 to score_rank')\n parser.add_argument('--cutoff', default=10000, type=int, help='pre-pick score ranking from 1 to cutoff, should cutoff > score_rank')\n parser.add_argument(\"-t\", '--train', action=\"store_true\", help=\"default: exclude train label at score ranking list\")\n parser.add_argument(\"-n\", \"--proc_num\", type=int, default=1, help=\"a number of processors for multiprocessing.\")\n parser.add_argument('--edge_type', type=str, help=\"edge_type: ppi(protein-protein), pci(protein-chemical), cci(chemical-chemical)\")\n args = parser.parse_args()\n print('\\n== args summary ==')\n pprint.pprint(vars(args))\n return args\n\n\ndef split_list(l, n):\n return [l[i::n] for i in range(n)]\n\n\ndef main():\n args = get_parser()\n start_time = time.time()\n\n node_names = build_node_name(args.node)\n # test_label_pairs = build_test_label_pairs(args.result, args.cv) # main code\n with open(\"./test_label_pairs.pkl\", \"rb\") as f: # only activate when test sample data\n test_label_pairs = pickle.load(f) # only activate when test sample data\n target_label_pairs = build_target_label_pairs(args.dataset)\n score_sort_toplist = sort_prediction_score(args.result, args.cv, target_label_pairs, test_label_pairs,\n args.score_rank, args.cutoff, args.train, args.edge_type)\n\n print('\\n== Start convesion of prediction scores ==')\n print(f'Train labels are {[\"included\" if args.train else \"excluded\"][0]}.')\n n_proc = args.proc_num\n pool = Pool(processes=n_proc)\n split_score_sort_toplist = split_list(score_sort_toplist, n_proc)\n with Manager() as manager:\n total_list = manager.list()\n convert_ = partial(convert, target_label_pairs=set(target_label_pairs), test_label_pairs=set(test_label_pairs),\n node_names=node_names, train=args.train, total_list=total_list)\n pool.map(convert_, split_score_sort_toplist)\n scores = [l[0] for l in total_list]\n rows = [l[1] for l in total_list]\n cols = [l[2] for l in total_list]\n gene1 = [l[3] for l in total_list]\n gene2 = [l[4] for l in total_list]\n train_edge = [l[5] for l in total_list]\n test_edge = [l[6] for l in total_list]\n new_edge = [l[7] for l in total_list]\n print(f'\\n#rows: {len(rows)}\\n'\n f'#cols: {len(cols)}\\n'\n f'#gene1: {len(gene1)}\\n'\n f'#gene2: {len(gene2)}\\n'\n f'#scores: {len(scores)}\\n'\n f'#train_edge: {len(train_edge)}\\n'\n f'#test_edge: {len(test_edge)}\\n'\n f'#new_edge: {len(new_edge)}')\n print('Completed conversion.')\n\n table_sort_score = process_table(rows, cols, gene1, gene2, scores, train_edge, test_edge, new_edge)\n print(f'\\n== Export the processed result as txt/pkl file ==\\n'\n f'output txt file path: {args.output}\\n'\n f'output pkl file path: {args.output_pkl}')\n with open(args.output, 'w') as f:\n table_sort_score.to_csv(f, sep='\\t', header=True, index=False)\n \n table_sort_score.to_pickle(args.output_pkl)\n #enrichment(target_label_pairs, test_label_pairs, table_sort_score, args.cv, args.train, args.edge_type)\n\n elapsed_time = time.time() - start_time\n print(f'\\n#time:{elapsed_time} sec\\n'\n f'-- fin --\\n')\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "prediction_score_for_multiprocess.py", "file_name": "prediction_score_for_multiprocess.py", "file_ext": "py", "file_size_in_byte": 14976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "joblib.load", "line_number": 50, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 72, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 133, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 199, "usage_type": "call"}, {"api_name": "scipy.stats.rankdata", "line_number": 210, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 210, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 233, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 264, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 278, "usage_type": "call"}, {"api_name": "time.time", "line_number": 288, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 293, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 301, "usage_type": "call"}, {"api_name": "multiprocessing.Manager", "line_number": 303, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 305, "usage_type": "call"}, {"api_name": "time.time", "line_number": 336, "usage_type": "call"}]} +{"seq_id": "276605336", "text": "from django.utils import encoding\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework import status\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.views import exception_handler as drf_exception_handler\n\nfrom rest_framework_json_api.utils import format_value\n\n\ndef exception_handler(exc, context):\n response = drf_exception_handler(exc, context)\n\n errors = []\n # handle generic errors. ValidationError('test') in a view for example\n if isinstance(response.data, list):\n for message in response.data:\n errors.append({\n 'detail': message,\n 'source': {\n 'pointer': '/data',\n },\n 'status': encoding.force_text(response.status_code),\n })\n # handle all errors thrown from serializers\n else:\n for field, error in response.data.items():\n field = format_value(field)\n pointer = '/data/attributes/{}'.format(field)\n # see if they passed a dictionary to ValidationError manually\n if isinstance(error, dict):\n errors.append(error)\n # or a string in case of AuthenticationError\n elif isinstance(error, str):\n # An error MUST be a JSON object in JSON API spec\n errors.append({\n 'detail': error\n })\n elif isinstance(error, list):\n for message in error:\n errors.append({\n 'detail': message,\n 'source': {\n 'pointer': pointer,\n },\n 'status': encoding.force_text(response.status_code),\n })\n else:\n errors.append({\n 'detail': message,\n 'source': {\n 'pointer': pointer,\n },\n 'status': encoding.force_text(response.status_code),\n })\n\n\n context['view'].resource_name = 'errors'\n response.data = errors\n return response\n\n\nclass Conflict(APIException):\n status_code = status.HTTP_409_CONFLICT\n default_detail = _('Conflict.')\n", "sub_path": "rest_framework_json_api/exceptions.py", "file_name": "exceptions.py", "file_ext": "py", "file_size_in_byte": 2265, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "rest_framework.views.exception_handler", "line_number": 11, "usage_type": "call"}, {"api_name": "django.utils.encoding.force_text", "line_number": 22, "usage_type": "call"}, {"api_name": "django.utils.encoding", "line_number": 22, "usage_type": "name"}, {"api_name": "rest_framework_json_api.utils.format_value", "line_number": 27, "usage_type": "call"}, {"api_name": "django.utils.encoding.force_text", "line_number": 45, "usage_type": "call"}, {"api_name": "django.utils.encoding", "line_number": 45, "usage_type": "name"}, {"api_name": "django.utils.encoding.force_text", "line_number": 53, "usage_type": "call"}, {"api_name": "django.utils.encoding", "line_number": 53, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.APIException", "line_number": 62, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_409_CONFLICT", "line_number": 63, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 63, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "107637581", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport xlwt\nimport xlrd\nfrom xlutils.copy import copy\n\n\nclass JdCrawlerPipeline(object):\n\n\n def process_item(self, item, spider):\n fields = ['id', 'topped', 'guid', 'content', 'creationTime', 'isTop', 'referenceId', 'referenceImage',\n 'referenceName', 'referenceTime', 'referenceType', 'referenceTypeId', 'firstCategory', 'secondCategory',\n 'thirdCategory', 'replyCount', 'replyCount2', 'score', 'status', 'title', 'usefulVoteCount',\n 'uselessVoteCount', 'userImage', 'userImageUrl', 'userLevelId', 'userProvince', 'viewCount', 'orderId',\n 'isReplyGrade', 'nickname', 'userClient', 'images', 'showOrderComment', 'mergeOrderStatus',\n 'discussionId', 'productColor', 'productSize', 'imageCount', 'integral', 'userImgFlag', 'anonymousFlag',\n 'userLevelName', 'plusAvailable', 'productSales', 'mobileVersion', 'aesPin', 'officialsStatus',\n 'excellent', 'recommend', 'userLevelColor', 'userClientShow', 'isMobile', 'days', 'afterDays']\n workbook = xlrd.open_workbook('comments.xls')\n rsheet = workbook.sheet_by_index(0)\n nrows = rsheet.nrows\n wb = copy(workbook)\n sheet = wb.get_sheet(0)\n for index,field in enumerate(fields):\n sheet.write(nrows,index,label = str(item[field]))\n wb.save('comments.xls')\n\n return item\n", "sub_path": "jd_crawler/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 1589, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "xlrd.open_workbook", "line_number": 25, "usage_type": "call"}, {"api_name": "xlutils.copy.copy", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "468133706", "text": "from discord import Embed\nfrom ifunny import objects\nfrom time import strftime, gmtime\nimport timeago, datetime\n\ndef ifunnyuser(data):\n user = objects.User.by_nick(data, client=robot)\n if user == None:\n userid = objects.User(id=data, client=robot)\n user = userid\n return user\n\ndef props(cls):\n return [i for i in cls.__dict__.keys() if i[:1] != '_']\n\n\ndef createUserEmbed(user, lastSeen=None, isActive=None):\n userData = user._object_data\n embedVar = Embed(title=user.nick, description=user.about, color=0x00ff00)\n if (user.profile_image):\n embedVar.set_thumbnail(url=user.profile_image.url)\n else:\n embedVar.set_thumbnail(url=\"https://cdn.miyako.rocks/iFunny/nopfp.png\")\n if (user.cover_image):\n embedVar.set_image(url=user.cover_image.url)\n else:\n embedVar.set_image(url=\"https://cdn.miyako.rocks/iFunny/nopfp.png\")\n embedVar.add_field(name=\"User ID\", value=userData['id'], inline=True)\n embedVar.add_field(name=\"Post Count\", value='{:,}'.format(userData['num']['created']), inline=True)\n embedVar.add_field(\n name=\"Rating\", value= \"{level} ({days} Days)\".format(\n level=userData['meme_experience']['rank'],\n days='{:,}'.format(userData['meme_experience']['days'])), \n inline=True)\n embedVar.add_field(name=\"Feature Count\", value='{:,}'.format(userData['num']['featured']), inline=True)\n embedVar.add_field(name=\"Smile Count\", value='{:,}'.format(userData['num']['total_smiles']), inline=True)\n embedVar.add_field(name=\"Subscriber Count\", value='{:,}'.format(userData['num']['subscribers']), inline=True)\n embedVar.add_field(name=\"Subscription Count\", value='{:,}'.format(userData['num']['subscriptions']), inline=True)\n embedVar.add_field(name=\"Verified\", value=str(userData['is_verified']), inline=True)\n embedVar.add_field(name=\"Chat Privacy\", value=userData['messaging_privacy_status'].capitalize(), inline=True)\n embedVar.add_field(name=\"Visit Profile\", value=\"[Click Here](%s)\" % userData['web_url'])\n if lastSeen:\n date = datetime.datetime.now()\n embedVar.add_field(\n name=\"Last Seen\",\n value=\"{time} ({ago})\".format(\n time=strftime(\"%b %d %Y %H:%M:%S\", gmtime(lastSeen/1000)),\n ago=timeago.format(lastSeen/1000, date)\n )\n )\n if isActive:\n embedVar.add_field(name=\"Is Active\", value=str(isActive), inline=True)\n embedVar.set_footer(text=\"Bot made by Request#0002\")\n return embedVar\n\ndef createIPEmbed(user, description=\"\"):\n embedVar = Embed(title=user.nick, description=user.about, color=0x00ff00)\n if (user.profile_image):\n embedVar.set_thumbnail(url=user.profile_image.url)\n else:\n embedVar.set_thumbnail(url=\"https://cdn.miyako.rocks/iFunny/nopfp.png\")\n embedVar.add_field(name=\"User ID\", value=user.id, inline=True)\n embedVar.add_field(name=\"IP\", value=\"Once the target views the image their ip will show [here]({iplink}).\".format(iplink=\"http://ip.miyako.rocks:8080/ip/{userid}.txt\".format(userid=user.id)), inline=True)\n embedVar.set_footer(text=\"Bot made by Request#0002\")\n return embedVar\n\ndef createIPLookupEmbed(ip, data):\n embedVar = Embed(title=ip, description=\"Infromation about {}\".format(ip), color=0x00ff00)\n for result in data:\n embedVar.add_field(name=result, value=data[result], inline=True)\n embedVar.set_footer(text=\"Bot made by Request#0002\")\n return embedVar\n\n", "sub_path": "functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 3490, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "ifunny.objects.User.by_nick", "line_number": 7, "usage_type": "call"}, {"api_name": "ifunny.objects.User", "line_number": 7, "usage_type": "attribute"}, {"api_name": "ifunny.objects", "line_number": 7, "usage_type": "name"}, {"api_name": "ifunny.objects.User", "line_number": 9, "usage_type": "call"}, {"api_name": "ifunny.objects", "line_number": 9, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 47, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 47, "usage_type": "call"}, {"api_name": "timeago.format", "line_number": 48, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 57, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "209646424", "text": "import numpy as np\nfrom scipy.signal import savgol_filter\nfrom math import ceil\n\ndef RemoveBaseline(spectrum, sensitivity = 5):\n\t\"\"\"\n\tGiven a series of values and a somewhat arbitrary sensitivity value, approximates\n\ta baseline by iterative savitsky-golay smoothing with increasing window size. The baseline\n\tis not allowed to rise above the values at any point.\n\n\tReturns the spectrum with baseline removed, as well as the baseline itself.\n\t\"\"\"\n\tdef _PeakStripping(spectrum, window):\n\t\tspectrum_smoothed = savgol_filter(spectrum, window, 0)\n\t\tbaseline = []\n\n\t\tfor s, ss in zip(spectrum, spectrum_smoothed):\n\t\t\tbaseline.append(min([s, ss]))\n\n\t\treturn np.array(baseline)\n\n\tif sensitivity < 3:\n\t\tsensitivity = 3\n\n\tlp = ceil(0.5 * len(spectrum))\n\t#pad spectrum at either end\n\tspectrum_0 = np.hstack([\n\t\t\tnp.full((lp,), spectrum[0]),\n\t\t\tspectrum,\n\t\t\tnp.full((lp,), spectrum[-1])\n\t\t])\n\tl2 = len(spectrum_0)\n\n\tn = 1\n\tnmax = len(spectrum)*0.9\n\tfoundMin = False\n\tS = spectrum_0\n\tA = []\n\tbaselines = []\n\twhile not foundMin:\n\t\tn = n + 2\n\t\ti = (n-1)/2\n\t\tbaseline = _PeakStripping(S, n)\n\t\tA.append(np.trapz(S - baseline))\n\t\tS = baseline\n\t\tbaselines.append(baseline)\n\n\t\tif i > sensitivity:\n\t\t\tif (A[-2] < A[-3]) and (A[-2] < A[-1]):\n\t\t\t\tfoundMin = True\n\n\t\tif n > nmax:\n\t\t\tfoundMin = True\n\n\tminIdx = np.argmin(A[sensitivity + 1:]) + sensitivity\n\tbaseline = baselines[minIdx][lp:-lp]\n\tspectrum_corrected = spectrum - baseline\n\n\treturn spectrum_corrected, baseline\n\n", "sub_path": "FrgTools/frgtools/curveprocessing.py", "file_name": "curveprocessing.py", "file_ext": "py", "file_size_in_byte": 1450, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "scipy.signal.savgol_filter", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.trapz", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "357246030", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Foo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ('slug', models.SlugField()),\n ('description', models.TextField(max_length=500)),\n ],\n ),\n ]\n", "sub_path": "qwe/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 591, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "380579370", "text": "\"\"\"44\n\nRevision ID: 2dd3e2586868\nRevises: bfb57a889a18\nCreate Date: 2018-05-24 15:33:31.264958\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '2dd3e2586868'\ndown_revision = 'bfb57a889a18'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('posts',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('body', sa.Text(), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('auther_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['auther_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_posts_timestamp'), 'posts', ['timestamp'], unique=False)\n op.drop_table('post')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('post',\n sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),\n sa.Column('body', mysql.TEXT(), nullable=True),\n sa.Column('timestamp', mysql.DATETIME(), nullable=True),\n sa.Column('auther_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['auther_id'], [u'users.id'], name=u'post_ibfk_1'),\n sa.PrimaryKeyConstraint('id'),\n mysql_default_charset=u'utf8',\n mysql_engine=u'InnoDB'\n )\n op.drop_index(op.f('ix_posts_timestamp'), table_name='posts')\n op.drop_table('posts')\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/2dd3e2586868_44.py", "file_name": "2dd3e2586868_44.py", "file_ext": "py", "file_size_in_byte": 1601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op.create_index", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "alembic.op.create_table", "line_number": 36, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 36, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.INTEGER", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 37, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.TEXT", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 38, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.DATETIME", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 39, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.INTEGER", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 40, "usage_type": "name"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 42, "usage_type": "call"}, {"api_name": "alembic.op.drop_index", "line_number": 46, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 46, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 46, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 47, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "22586636", "text": "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\nimport os\nimport numpy as np\nimport onnx\nfrom .tests_helper import dump_data_and_model # noqa\nfrom .tests_helper import ( # noqa\n dump_one_class_classification,\n dump_binary_classification,\n dump_multilabel_classification,\n dump_multiple_classification,\n)\nfrom .tests_helper import ( # noqa\n dump_multiple_regression,\n dump_single_regression,\n convert_model,\n fit_classification_model,\n fit_multilabel_classification_model,\n fit_regression_model,\n)\n\n\ndef create_tensor(N, C, H=None, W=None):\n if H is None and W is None:\n return np.random.rand(N, C).astype(np.float32, copy=False)\n elif H is not None and W is not None:\n return np.random.rand(N, C, H, W).astype(np.float32, copy=False)\n else:\n raise ValueError('This function only produce 2-D or 4-D tensor.')\n\n\ndef _get_ir_version(opv):\n if opv >= 12:\n return 7\n if opv >= 11:\n return 6\n if opv >= 10:\n return 5\n if opv >= 9:\n return 4\n if opv >= 8:\n return 4\n return 3\n\n\nTARGET_OPSET = int(os.environ.get('TEST_TARGET_OPSET',\n onnx.defs.onnx_opset_version()))\nTARGET_IR = int(os.environ.get('TEST_TARGET_IR',\n _get_ir_version(TARGET_OPSET)))\n", "sub_path": "tests/test_utils/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1583, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.random.rand", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 49, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 49, "usage_type": "attribute"}, {"api_name": "onnx.defs.onnx_opset_version", "line_number": 50, "usage_type": "call"}, {"api_name": "onnx.defs", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 51, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 51, "usage_type": "attribute"}]} +{"seq_id": "292456643", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nlinear_regression.py - Linear-Regression solvers module\n=======================================================\n\nThis module contains all available Linear-Regression solvers.\nThese solvers can be received by using the only public method :func:`get_method`.\n\nExample:\n::\n get_method(LinearRegressionMethods.SVDBased) - Creating the standard Numpy solver for Linear-Regression.\n\n\"\"\"\n\nfrom numpy.linalg import lstsq, inv, qr\nimport numpy as np\nfrom scipy.linalg import solve_triangular, solve\nfrom scipy.sparse.linalg import lsqr\nfrom Infrastructure.enums import LinearRegressionMethods\nfrom Infrastructure.utils import ex, create_factory, Dict, Scalar, ColumnVector, Matrix, Callable\nfrom ComparedAlgorithms.method_boosters import cholesky_booster, caratheodory_booster, \\\n create_coreset_fast_caratheodory, fast_caratheodory_set_python\nfrom ComparedAlgorithms.sketch_preconditioner import generate_sketch_preconditioner\nfrom ComparedAlgorithms.base_least_square_solver import BaseSolver\n\n\nclass _SVDSolver(BaseSolver):\n @ex.capture\n def __init__(self, data_features: Matrix, output_samples: ColumnVector, cross_validation_folds: int,\n n_alphas: int = -1):\n r\"\"\"\n The standard solver of Numpy for Linear-Regression.\n\n Args:\n data_features(Matrix): The input data matrix :math:`n \\times d`.\n output_samples(ColumnVector): The output for the given inputs, :math:`n \\times 1`.\n n_alphas(int): The number of total regularization terms which will be tested by this solver.\n cross_validation_folds(int): The number of cross-validation folds used in this solver.\n\n \"\"\"\n super(_SVDSolver, self).__init__(data_features, output_samples, n_alphas, cross_validation_folds)\n self._model = None\n\n def fit(self) -> ColumnVector:\n \"\"\"\n The method which fits the requested model to the given data.\n \"\"\"\n self._fitted_coefficients = lstsq(self._data_features, self._output_samples, rcond=-1)[0]\n return self._fitted_coefficients\n\n\nclass _QRSolver(BaseSolver):\n @ex.capture\n def __init__(self, data_features: Matrix, output_samples: ColumnVector, cross_validation_folds: int,\n n_alphas: int = -1):\n r\"\"\"\n A solver for Linear-Regression, based on QR-decomposition.\n\n Args:\n data_features(Matrix): The input data matrix :math:`n \\times d`.\n output_samples(ColumnVector): The output for the given inputs, :math:`n \\times 1`.\n n_alphas(int): The number of total regularization terms which will be tested by this solver.\n cross_validation_folds(int): The number of cross-validation folds used in this solver.\n\n \"\"\"\n super(_QRSolver, self).__init__(data_features, output_samples, n_alphas, cross_validation_folds)\n self._model = None\n\n def fit(self) -> ColumnVector:\n \"\"\"\n The method which fits the requested model to the given data.\n \"\"\"\n q, r = qr(self._data_features)\n self._fitted_coefficients = solve_triangular(r, q.T.dot(self._output_samples), lower=False, check_finite=False)\n return self._fitted_coefficients\n\n\nclass _NormalEquationsSolver(BaseSolver):\n @ex.capture\n def __init__(self, data_features: Matrix, output_samples: ColumnVector, cross_validation_folds: int,\n n_alphas: int = -1):\n r\"\"\"\n A solver for Linear-Regression, based on solving the Normal-Equations.\n\n Args:\n data_features(Matrix): The input data matrix :math:`n \\times d`.\n output_samples(ColumnVector): The output for the given inputs, :math:`n \\times 1`.\n n_alphas(int): The number of total regularization terms which will be tested by this solver.\n cross_validation_folds(int): The number of cross-validation folds used in this solver.\n\n \"\"\"\n super(_NormalEquationsSolver, self).__init__(data_features, output_samples, n_alphas, cross_validation_folds)\n self._model = None\n\n def fit(self) -> ColumnVector:\n \"\"\"\n The method which fits the requested model to the given data.\n \"\"\"\n gram: Matrix = self._data_features.T.dot(self._data_features)\n self._fitted_coefficients = self._data_features.T.dot(self._output_samples)\n self._fitted_coefficients = solve(gram, self._fitted_coefficients, overwrite_a=True, overwrite_b=True,\n check_finite=False, assume_a=\"sym\")\n return self._fitted_coefficients\n\n\nclass _SketchPreconditionerSolver(BaseSolver):\n @ex.capture\n def __init__(self, data_features: Matrix, output_samples: ColumnVector, cross_validation_folds: int, _seed,\n n_alphas: int = -1):\n r\"\"\"\n A solver for Linear-Regression, based on solving the Normal-Equations.\n\n Args:\n data_features(Matrix): The input data matrix :math:`n \\times d`.\n output_samples(ColumnVector): The output for the given inputs, :math:`n \\times 1`.\n n_alphas(int): The number of total regularization terms which will be tested by this solver.\n cross_validation_folds(int): The number of cross-validation folds used in this solver.\n\n \"\"\"\n super(_SketchPreconditionerSolver, self).__init__(data_features, output_samples, n_alphas,\n cross_validation_folds)\n self._model = None\n self._seed = _seed\n\n @ex.capture(prefix=\"sketch_preconditioned_config\")\n def fit(self, sampled_rows: float, switch_sign_probability: float, min_sampled_rows: float) -> ColumnVector:\n \"\"\"\n The method which fits the requested model to the given data.\n \"\"\"\n num_of_rows: int = max(int(sampled_rows * len(self._data_features)), int(min_sampled_rows))\n _, R = qr(generate_sketch_preconditioner(self._data_features, num_of_rows, np.empty_like(self._data_features),\n self._seed, switch_sign_probability))\n partial_solution: ColumnVector = lsqr(self._data_features.dot(inv(R)), self._output_samples,\n atol=1e-15, btol=1e-15)[0]\n self._fitted_coefficients = solve_triangular(R, partial_solution, lower=False, check_finite=False)\n return self._fitted_coefficients\n\n\nclass _SketchInverseSolver(BaseSolver):\n @ex.capture\n def __init__(self, data_features: Matrix, output_samples: ColumnVector, cross_validation_folds: int,\n n_alphas: int = -1):\n r\"\"\"\n A solver for Linear-Regression, based on boosting the algorithm which solves the Normal-Equations,\n using fast Caratheodory method.\n\n Args:\n data_features(Matrix): The input data matrix :math:`n \\times d`.\n output_samples(ColumnVector): The output for the given inputs, :math:`n \\times 1`.\n n_alphas(int): The number of total regularization terms which will be tested by this solver.\n cross_validation_folds(int): The number of cross-validation folds used in this solver.\n\n \"\"\"\n super(_SketchInverseSolver, self).__init__(data_features, output_samples, -1, cross_validation_folds)\n self._model = None\n\n @ex.capture\n def fit(self, clusters_count, is_positive_definite: bool = False) -> ColumnVector:\n \"\"\"\n The method which fits the requested model to the given data.\n \"\"\"\n coreset: Matrix = create_coreset_fast_caratheodory(self._data_features, clusters_count)\n adapted_data, adapted_output, outputs_sum = self._preprocess_data()\n weights, chosen_indices = fast_caratheodory_set_python(adapted_data.T, adapted_output, clusters_count)\n a_times_outputs: ColumnVector = outputs_sum * adapted_data[chosen_indices, :].T.dot(weights)\n self._fitted_coefficients = solve(coreset.T.dot(coreset), a_times_outputs, overwrite_a=True, overwrite_b=True,\n check_finite=False, assume_a=\"pos\" if is_positive_definite else \"sym\")\n return self._fitted_coefficients\n\n def _preprocess_data(self):\n data_copy: Matrix = self._data_features.copy()\n output_copy: ColumnVector = self._output_samples.copy()\n negative_indices: ColumnVector = np.argwhere(self._output_samples < 0)\n data_copy[negative_indices, :] *= -1\n output_copy[negative_indices] *= -1\n output_sum = np.sum(output_copy)\n return data_copy, output_copy/output_sum, output_sum\n\n\n_sketch_cholesky_linear_regression: Callable = cholesky_booster(_SVDSolver)\n_caratheodory_booster_linear_regression: Callable = caratheodory_booster(_SVDSolver, perform_normalization=False)\n\n# A private dictionary used for creating the solvers factory :func:`get_method`.\n_linear_regressions_methods: Dict[str, Callable] = {\n LinearRegressionMethods.SVDBased: _SVDSolver,\n LinearRegressionMethods.QRBased: _QRSolver,\n LinearRegressionMethods.NormalEquationsBased: _NormalEquationsSolver,\n LinearRegressionMethods.SketchAndCholesky: _sketch_cholesky_linear_regression,\n LinearRegressionMethods.BoostedSVDSolver: _caratheodory_booster_linear_regression,\n LinearRegressionMethods.SketchAndInverse: _SketchInverseSolver,\n LinearRegressionMethods.SketchPreconditioned: _SketchPreconditionerSolver\n}\n\n# A factory which creates the requested linear-regression solvers.\nget_method: Callable = create_factory(_linear_regressions_methods, are_methods=True)\n", "sub_path": "ComparedAlgorithms/linear_regression.py", "file_name": "linear_regression.py", "file_ext": "py", "file_size_in_byte": 9565, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "ComparedAlgorithms.base_least_square_solver.BaseSolver", "line_number": 27, "usage_type": "name"}, {"api_name": "Infrastructure.utils.Matrix", "line_number": 29, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 29, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ex.capture", "line_number": 28, "usage_type": "attribute"}, {"api_name": "Infrastructure.utils.ex", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.linalg.lstsq", "line_number": 48, "usage_type": "call"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 44, "usage_type": "name"}, {"api_name": "ComparedAlgorithms.base_least_square_solver.BaseSolver", "line_number": 52, "usage_type": "name"}, {"api_name": "Infrastructure.utils.Matrix", "line_number": 54, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 54, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ex.capture", "line_number": 53, "usage_type": "attribute"}, {"api_name": "Infrastructure.utils.ex", "line_number": 53, "usage_type": "name"}, {"api_name": "numpy.linalg.qr", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.linalg.solve_triangular", "line_number": 74, "usage_type": "call"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 69, "usage_type": "name"}, {"api_name": "ComparedAlgorithms.base_least_square_solver.BaseSolver", "line_number": 78, "usage_type": "name"}, {"api_name": "Infrastructure.utils.Matrix", "line_number": 80, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 80, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ex.capture", "line_number": 79, "usage_type": "attribute"}, {"api_name": "Infrastructure.utils.ex", "line_number": 79, "usage_type": "name"}, {"api_name": "Infrastructure.utils.Matrix", "line_number": 99, "usage_type": "name"}, {"api_name": "scipy.linalg.solve", "line_number": 101, "usage_type": "call"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 95, "usage_type": "name"}, {"api_name": "ComparedAlgorithms.base_least_square_solver.BaseSolver", "line_number": 106, "usage_type": "name"}, {"api_name": "Infrastructure.utils.Matrix", "line_number": 108, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 108, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ex.capture", "line_number": 107, "usage_type": "attribute"}, {"api_name": "Infrastructure.utils.ex", "line_number": 107, "usage_type": "name"}, {"api_name": "numpy.linalg.qr", "line_number": 131, "usage_type": "call"}, {"api_name": "ComparedAlgorithms.sketch_preconditioner.generate_sketch_preconditioner", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 131, "usage_type": "call"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 133, "usage_type": "name"}, {"api_name": "scipy.sparse.linalg.lsqr", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 133, "usage_type": "call"}, {"api_name": "scipy.linalg.solve_triangular", "line_number": 135, "usage_type": "call"}, {"api_name": "Infrastructure.utils.ex.capture", "line_number": 125, "usage_type": "call"}, {"api_name": "Infrastructure.utils.ex", "line_number": 125, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 126, "usage_type": "name"}, {"api_name": "ComparedAlgorithms.base_least_square_solver.BaseSolver", "line_number": 139, "usage_type": "name"}, {"api_name": "Infrastructure.utils.Matrix", "line_number": 141, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 141, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ex.capture", "line_number": 140, "usage_type": "attribute"}, {"api_name": "Infrastructure.utils.ex", "line_number": 140, "usage_type": "name"}, {"api_name": "Infrastructure.utils.Matrix", "line_number": 162, "usage_type": "name"}, {"api_name": "ComparedAlgorithms.method_boosters.create_coreset_fast_caratheodory", "line_number": 162, "usage_type": "call"}, {"api_name": "ComparedAlgorithms.method_boosters.fast_caratheodory_set_python", "line_number": 164, "usage_type": "call"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 165, "usage_type": "name"}, {"api_name": "scipy.linalg.solve", "line_number": 166, "usage_type": "call"}, {"api_name": "Infrastructure.utils.ex.capture", "line_number": 157, "usage_type": "attribute"}, {"api_name": "Infrastructure.utils.ex", "line_number": 157, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 158, "usage_type": "name"}, {"api_name": "Infrastructure.utils.Matrix", "line_number": 171, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 172, "usage_type": "name"}, {"api_name": "Infrastructure.utils.ColumnVector", "line_number": 173, "usage_type": "name"}, {"api_name": "numpy.argwhere", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 176, "usage_type": "call"}, {"api_name": "Infrastructure.utils.Callable", "line_number": 180, "usage_type": "name"}, {"api_name": "ComparedAlgorithms.method_boosters.cholesky_booster", "line_number": 180, "usage_type": "call"}, {"api_name": "Infrastructure.utils.Callable", "line_number": 181, "usage_type": "name"}, {"api_name": "ComparedAlgorithms.method_boosters.caratheodory_booster", "line_number": 181, "usage_type": "call"}, {"api_name": "Infrastructure.utils.Dict", "line_number": 184, "usage_type": "name"}, {"api_name": "Infrastructure.utils.Callable", "line_number": 184, "usage_type": "name"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods.SVDBased", "line_number": 185, "usage_type": "attribute"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods", "line_number": 185, "usage_type": "name"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods.QRBased", "line_number": 186, "usage_type": "attribute"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods", "line_number": 186, "usage_type": "name"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods.NormalEquationsBased", "line_number": 187, "usage_type": "attribute"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods", "line_number": 187, "usage_type": "name"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods.SketchAndCholesky", "line_number": 188, "usage_type": "attribute"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods", "line_number": 188, "usage_type": "name"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods.BoostedSVDSolver", "line_number": 189, "usage_type": "attribute"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods", "line_number": 189, "usage_type": "name"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods.SketchAndInverse", "line_number": 190, "usage_type": "attribute"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods", "line_number": 190, "usage_type": "name"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods.SketchPreconditioned", "line_number": 191, "usage_type": "attribute"}, {"api_name": "Infrastructure.enums.LinearRegressionMethods", "line_number": 191, "usage_type": "name"}, {"api_name": "Infrastructure.utils.Callable", "line_number": 195, "usage_type": "name"}, {"api_name": "Infrastructure.utils.create_factory", "line_number": 195, "usage_type": "call"}]} +{"seq_id": "600472129", "text": "import argparse\nfrom random import random\nfrom sklearn.datasets import load_svmlight_file\nfrom sklearn.externals import joblib\n\nDESCRIPTOR = \"pcd\"\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description = \"Random Epitope Binding Predictor\")\n parser.add_argument('-input', metavar='in-file')\n parser.add_argument('-encoded_input', metavar='encoded-in-file')\n parser.add_argument('-output', metavar='out-file')\n args = parser.parse_args()\n\n # read peptides\n peptides = [line.rstrip('\\n') for line in open(args.input)]\n\n # load data\n data = load_svmlight_file(args.encoded_input)[0]\n\n # load trained classifier\n clf = joblib.load(\"clf_\" + DESCRIPTOR + \".pkl\")\n\n # predict given data\n predictions = clf.predict(data)\n\n # write peptides and their predictions to file\n out_file = open(args.output, 'w')\n for peptide, prediction in zip(peptides, predictions):\n out_file.write(peptide + \"\\t\" + str(int(prediction)) + \"\\n\")\n out_file.close()\n", "sub_path": "my_predictor_pcd.py", "file_name": "my_predictor_pcd.py", "file_ext": "py", "file_size_in_byte": 1010, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_svmlight_file", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "50893939", "text": "#!/usr/bin/env python\nimport os\nfrom setuptools import find_packages, setup\nimport warnings\n\n\ndef parse_requirements(filename):\n \"\"\" Parse a requirements file ignoring comments and -r inclusions of other files \"\"\"\n reqs = []\n with open(filename, 'r') as f:\n for line in f:\n hash_idx = line.find('#')\n if hash_idx >= 0:\n line = line[:hash_idx]\n line = line.strip()\n if line:\n reqs.append(line)\n return reqs\n\n\nwith open('README.md', 'r') as f:\n readme = f.read().strip()\n\n\nsetup(\n name=\"SUSO\",\n version='0.0.1',\n url=\"https://github.com/thelabdc/OVSJG-SUSO\",\n author=\"The Lab @ DC\",\n author_email=\"the.lab@dc.gov\",\n license=\"Proprietary\",\n packages=find_packages(),\n include_package_data=True,\n install_requires=parse_requirements('requirements.txt'),\n tests_require=parse_requirements('requirements.testing.txt'),\n description=\"Tools for the OVSJG SUSO project\",\n entry_points={\n 'console_scripts': ['susocli=suso.cli:cli']\n },\n long_description=\"\\n\" + readme\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1020, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "setuptools.setup", "line_number": 25, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "604297791", "text": "# -*- coding: utf-8 -*-\nimport pytest\nfrom data.add_group import constant as testdata\nfrom model.group import Group\n\n\n@pytest.mark.parametrize(\"group\", testdata, ids=[repr(x) for x in testdata])\ndef test_add_group(app, group):\n old_groups = app.group.get_group_list()\n app.group.create(group)\n assert len(old_groups) +1 ==app.group.count()\n new_groups = app.group.get_group_list()\n old_groups.append(group)\n assert sorted (old_groups, key=Group.id_or_max)==sorted(new_groups, key=Group.id_or_max)\n\n\n", "sub_path": "test/test_add_group.py", "file_name": "test_add_group.py", "file_ext": "py", "file_size_in_byte": 518, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "model.group.Group.id_or_max", "line_number": 14, "usage_type": "attribute"}, {"api_name": "model.group.Group", "line_number": 14, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 7, "usage_type": "call"}, {"api_name": "data.add_group.constant", "line_number": 7, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 7, "usage_type": "attribute"}]} +{"seq_id": "96646623", "text": "from flask import Flask, jsonify\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n\n#################################################\n# Flask Setup\n#################################################\n\napp = Flask(__name__)\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef welcome():\n \"\"\"List of all available API routes\"\"\"\n return (\n \"Welcome to the Hawaiian weather API!
\"\n \"Available Routes:
\"\n \"/api/v1.0/precipitation
\"\n \"/api/v1.0/stations
\"\n \"/api/v1.0/tobs
\"\n \"For the following two calls, dates (start and end) must be in %Y-%m-%d format
\"\n \"/api/v1.0/start
\"\n \"/api/v1.0/start/end\"\n )\n\n\n@app.route(\"/api/v1.0/precipitation\")\ndef prcp():\n \"\"\"Query for the dates and temperature observations from the last year.\n Convert the query results to a Dictionary using date as the key and tobs as the value.\n Return the JSON representation of your dictionary.\"\"\"\n\n # Define the datetime one year prior to the max date in the dataset\n year_prior = dt.datetime.strptime('2016-08-23', '%Y-%m-%d')\n \n # Query for the prior year's worth of temps\n results = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= year_prior).all()\n\n # Create a dictionary from the row data and create it from the query result\n temp_dict = {}\n for temp in results:\n temp_dict[temp.date] = temp.tobs\n \n return jsonify(temp_dict)\n\n\n@app.route(\"/api/v1.0/stations\")\ndef station():\n \"\"\"Return a JSON list of stations from the dataset.\"\"\"\n\n # Pull all stations and station names from Station\n results = session.query(Station.name).all()\n \n # Create a list to track our results to eventually turn into a JSON\n station_list = []\n \n # Loop through the query results and add the station name to the list\n for x in results:\n station_list.append(x.name)\n\n return jsonify(station_list)\n\n\n@app.route(\"/api/v1.0/tobs\")\ndef temps():\n \"\"\"Return a JSON list of Temperature Observations (tobs) for the previous year\"\"\"\n\n # Define the datetime one year prior to the max date in the dataset\n year_prior = dt.datetime.strptime('2016-08-23', '%Y-%m-%d')\n \n # Query for all temperature observations in the last year of the dataset\n results = session.query(Measurement.tobs).filter(Measurement.date >= year_prior).all()\n \n tobs_list = []\n for x in results:\n tobs_list.append(x.tobs)\n\n return jsonify(tobs_list)\n\n\n@app.route(\"/api/v1.0/\")\ndef start_dt(start):\n \"\"\"Return a JSON list of the minimum temperature, the average temperature,\n and the max temperature for a given start range i.e. for all dates greater than or equal to the start date.\n If the date requested is above the maximum date, return a 404 error.\"\"\"\n\n # Define the datetime one year prior to the max date in the dataset\n try:\n start_dttm = dt.datetime.strptime(start, '%Y-%m-%d')\n except ValueError:\n return jsonify({\"error\": \"dates must be in %Y-%m-%d format\"}), 404 \n \n # Query for all temperature observations from start date forward\n results = session.query(Measurement.tobs).filter(Measurement.date >= start_dttm).all()\n \n tobs_list = []\n for x in results:\n tobs_list.append(x.tobs)\n \n if tobs_list != []:\n result_dict = {\n 'TMIN': min(tobs_list),\n 'TMAX': max(tobs_list),\n 'TAVG': np.mean(tobs_list)\n }\n\n return jsonify(result_dict)\n \n # If the date is past the max date in the dataset, produce a 404 error\n return jsonify({\"error\": \"start date is past the maximum date in the dataset\"}), 404\n\n\n@app.route(\"/api/v1.0//\")\ndef date_range(start, end):\n \"\"\"Return a JSON list of the minimum temperature, the average temperature,\n and the max temperature for dates between the start and end date inclusive.\n If the start date requested is above the maximum date, return a 404 error.\n If the end date is less than the start date, return a 404 error.\"\"\"\n\n # Define the datetime one year prior to the max date in the dataset\n try: \n start_dttm = dt.datetime.strptime(start, '%Y-%m-%d')\n end_dttm = dt.datetime.strptime(end, '%Y-%m-%d')\n except ValueError:\n return jsonify({\"error\": \"dates must be in %Y-%m-%d format\"}), 404\n \n if start_dttm > end_dttm:\n return jsonify({\"error\": \"start date cannot be greater than end date\"}), 404\n \n # Query for all temperature observations from start date forward\n results = session.query(Measurement.tobs).filter(Measurement.date >= start_dttm).filter(Measurement.date <= end_dttm).all()\n \n tobs_list = []\n for x in results:\n tobs_list.append(x.tobs)\n \n if tobs_list != []:\n result_dict = {\n 'TMIN': min(tobs_list),\n 'TMAX': max(tobs_list),\n 'TAVG': np.mean(tobs_list)\n }\n\n return jsonify(result_dict)\n \n # If the date is past the max date in the dataset, produce a 404 error\n return jsonify({\"error\": \"chosen date is past the maximum date in the dataset\"}), 404\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5927, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.automap.automap_base", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 97, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 97, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 117, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 117, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 138, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 150, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 150, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 151, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 151, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 153, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "648335813", "text": "\nimport z \ndates = z.getp(\"dates\")\nprint(\"dates : {}\".format( len(dates )))\nsdate = \"2013-01-02\"\ndidx = dates.index(sdate)\nprint(\"didx : {}\".format( didx ))\nlens = len(dates)\nfor sdate in range(-1*(lens-didx),-252):\n edate = sdate + 252\n print(\"date : {}\".format( dates[sdate] ))\n print(\"date : {}\".format( dates[edate] ))\n raise SystemExit\n\nfrom collections import defaultdict, deque\nfrom sortedcontainers import SortedSet\nimport random\nimport zen\nimport csv\nimport os\nstocks = zen.getLongEtfList()\n\n#problems = z.getp(\"etfproblems\")\ndics = defaultdict(dict)\ndef csvToDic(directory = \"historical\"):\n path = z.getPath(directory) \n listOfFiles = os.listdir(path)\n for idx,entry in enumerate(listOfFiles):\n if not idx % 100:\n print(\"idx: {}\".format( idx))\n \n astock = os.path.splitext(entry)[0]\n path = z.getPath(\"{}/{}\".format(directory, entry))\n for row in csv.DictReader(open(path)):\n date = row['Date']\n dics[astock][date] = float(row['Close'])\n# z.setp(dics, \"bigdic\")\n# z.setp(dics, \"bigdic\")\n#csvToDic(directory=\"ETF\")\n#raise SystemExit\ndics = z.getp(\"bigdic\")\n\n#dics = z.getp(\"BUY2_P\")\ntestpoints = 5000\n\ndates = z.getp(\"dates\")\nnum_days = len(dates)\nendi = (num_days-252)-1\nstarti = dates.index(\"2014-01-02\")\n\nvals = defaultdict(list)\nnegs = defaultdict(int)\nproblems = set()\n\nstocks = dics.keys()\n#for astock in dics.keys():\n# rank = zen.getMCRank(astock)\n# if rank == \"NA\":\n# continue\n# if rank < 1200:\n# stocks.append(astock)\nsdate = \"2013-01-02\"\ndidx = dates.index(sdate)\nfor test in range(testpoints):\n if not test % 100:\n print(\"test : {}\".format( test ))\n first = random.randrange(starti, endi)\n second = first + 252\n fd = dates[first]\n# print(\"fd : {}\".format( fd ))\n sd = dates[second]\n# print(\"sd : {}\".format( sd ))\n for astock in stocks:\n if astock in problems:\n continue\n# print(\"astock : {}\".format( astock ))\n try:\n\n first = dics[astock][fd]\n change = round(dics[astock][sd] / first,4)\n# print(\"change : {}\".format( change ))\n except Exception as e:\n# print(\"dics: {}\".format( dics[astock]))\n# print(\"first : {}\".format( first ))\n# print(\"astock: {}\".format( astock))\n# z.trace(e)\n problems.add(astock)\n# exit()\n continue\n\n if change < 1.00:\n negs[astock] += 1\n\n vals[astock].append(change)\n#print(\"problems : {}\".format( problems ))\n\n#z.setp(problems, \"etfproblems\")\n\n#ss = SortedSet()\n#for key,value in vals.items():\n# avg = z.avg(value)\n# ss.add((avg, key))\n#\n#save = ss[-15:]\n#z.setp(save, \"ranketf2\")\n#print(ss[-15:])\n#for item in ss[-15:]:\n# try:\n# print (item[1], round(negs[item[1]]/testpoints,3))\n# except:\n# pass\n\nimport statistics\nmedian = SortedSet()\nlowest = SortedSet()\nlowestdic = dict()\n#yearlydic = z.getp(\"yearlydic\")\nfor key,value in vals.items():\n if key in problems:\n continue\n y1m = statistics.median(value)\n y1w = min(value)\n lowest.add((y1w, key))\n median.add((y1m, key))\n# yearlydic[key] = (y1w, y1m)\n\n#z.setp(lowestdic,\"lowestdic\")\n#z.setp(yearlydic,\"yearlydic\")\nprint(\"lowest: {}\".format( lowest[-20:]))\nprint(\"median: {}\".format( median[-20:]))\n\nz.setp(lowest[-20:],\"lowyear\")\n\n#path = z.getPath(\"analysis/etfanalysis.csv\")\n#with open(path, \"w\") as f:\n# for item in ss:\n# f.write(\"{},{}\\n\".format(item[1], item[0]))\n\n#save = ss[-15:]\n#z.setp(save, \"ranketf2\")\n#print(ss[-200:])\ndef saveranketf():\n yearly = list()\n ss2 = SortedSet()\n import util\n \n for item in ss:\n # print (item, round(\n print (item, round(negs[item[1]]/testpoints,3))\n \n try:\n etfc = float(util.getEtfQualifications(item[1], count=True))/10\n print(\"etfc : {}\".format( etfc ))\n except:\n etfc = 1\n \n percent = negs[item[1]]/testpoints\n score = item[0] - (2*percent) - etfc\n ss2.add((score, item[1]))\n \n print(\"ss2: {}\".format( ss2[-5:]))\n z.setp(ss2[-5:], \"ranketf\")\n\n#saveranketf()\n# try:\n# print (\n# item[1]\n# , round(negs[item[1]]/testpoints,3))\n# except:\n# pass\n\n#print(ss[:10])\n\n\n\n", "sub_path": "python/old/ranketf.py", "file_name": "ranketf.py", "file_ext": "py", "file_size_in_byte": 4361, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "z.getp", "line_number": 3, "usage_type": "call"}, {"api_name": "zen.getLongEtfList", "line_number": 21, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 24, "usage_type": "call"}, {"api_name": "z.getPath", "line_number": 26, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "z.getPath", "line_number": 33, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 34, "usage_type": "call"}, {"api_name": "z.getp", "line_number": 41, "usage_type": "call"}, {"api_name": "z.getp", "line_number": 46, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 51, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 52, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 67, "usage_type": "call"}, {"api_name": "sortedcontainers.SortedSet", "line_number": 114, "usage_type": "call"}, {"api_name": "sortedcontainers.SortedSet", "line_number": 115, "usage_type": "call"}, {"api_name": "statistics.median", "line_number": 121, "usage_type": "call"}, {"api_name": "z.setp", "line_number": 132, "usage_type": "call"}, {"api_name": "sortedcontainers.SortedSet", "line_number": 144, "usage_type": "call"}, {"api_name": "util.getEtfQualifications", "line_number": 152, "usage_type": "call"}, {"api_name": "z.setp", "line_number": 162, "usage_type": "call"}]} +{"seq_id": "131520736", "text": "# coding:utf-8\n\nfrom django.shortcuts import render, get_object_or_404\nfrom .models import Category, Product\nfrom .forms import ProductAddToCartForm\n\ndef index(request, template_name):\n page_title = \"产品分类目录-小白购\"\n request.session['name']='hello'\n return render(request, template_name, locals())\n\ndef show_category(request, category_slug, template_name):\n c = get_object_or_404(Category, slug=category_slug)\n product = c.product_set.all()\n page_title = c.name\n meta_keywords = c.meta_keywords\n meta_description = c.description\n return render(request, template_name, locals())\n\ndef show_product(request, product_slug, template_name):\n p = get_object_or_404(Product, slug=product_slug)\n categories = p.categories.filter(is_active=True)\n page_title = p.name\n meta_keywords = p.meta_keywords\n meta_description = p.meta_description\n form = ProductAddToCartForm()\n return render(request, template_name, locals())\n\n\n\n\n\n\n", "sub_path": "shopsys/apps/catalog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 977, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.shortcuts.render", "line_number": 10, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 21, "usage_type": "argument"}, {"api_name": "forms.ProductAddToCartForm", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "18492428", "text": "from PyQt5 import QtCore, QtGui, QtWidgets\nfrom UI.UIgroupeEdit import Ui_Form\n\nclass F_Groupe(QtWidgets.QWidget, Ui_Form):\n \n def __init__(self, xmlFile, model, parent=None):\n super(F_Groupe, self).__init__(parent)\n self.model = model\n\n ## on abonne la fenetre au model pour quel recoive les modifications\n self.model.get(\"stations\").abonne(self)\n self.model.get(\"parametres\").abonne(self)\n\n self.listCur = None\n self.pere = None\n \n self.setupUi(self)\n\n ## on connecte les evenement a leur fonction\n self.buttonEdit.clicked.connect(self.buttonEdit_clicked)\n self.listViewCur.clicked.connect(self.listViewCur_clicked)\n self.listViewEle.clicked.connect(self.listViewEle_clicked)\n\n## fonction appele quand les models sont modifiés\n def refresh(self):\n \n ## on regarde si la liste courante a ete initialise\n if(self.listCur!=None):\n modelS = self.model.get(\"stations\")\n modelP = self.model.get(\"parametres\")\n self.listNomCur = []\n self.listNomEle = []\n \n ## on recupere la liste element (stations ou parametres)\n if(self.listCur[0].getStatut()==\"station\"):\n self.listEle = modelS.getListStation()\n \n elif(self.listCur[0].getStatut()==\"parametre\"):\n self.listEle = modelP.getListParametre()\n \n ## on recupere la liste de nom de la liste courante \n for elt in self.listCur:\n self.listNomCur.append(elt.getNom())\n\n ## on recupere la liste de nom de la liste element\n for elt in self.listEle:\n self.listNomEle.append(elt.getNom())\n \n self.init_table()\n \n def init_table(self):\n ## on cree les models des listView\n self.modelCur = QtCore.QStringListModel()\n self.modelEle = QtCore.QStringListModel()\n\n ## on lie les models aux listView\n self.listViewCur.setModel(self.modelCur)\n self.listViewEle.setModel(self.modelEle)\n\n #on ajoute les liste dans les models des listView\n self.modelCur.setStringList(self.listNomCur)\n self.modelEle.setStringList(self.listNomEle)\n\n #on bloque l'edition dans les listes view\n self.listViewCur.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.listViewEle.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n\n\n def listViewCur_clicked(self ,item):\n modelS = self.model.get(\"stations\")\n modelP = self.model.get(\"parametres\")\n\n ## on recupere le nom de l'item selectionne (item.data() change suite au removeRow)\n nomItem = item.data()\n\n ## on verifie que l'on a au moins un item dans la liste courante\n if(self.modelCur.rowCount()>1):\n \n ## on supprime l'item selectionne\n self.modelCur.removeRow(item.row())\n\n ## on recupere l'element selectionne\n elt = modelS.getStation(nomItem)\n \n if(elt==None):\n elt = modelP.getParametre(nomItem)\n\n ## on enleve l'element selectionne de la liste courante\n self.listCur.pop(self.listCur.index(elt))\n\n def listViewEle_clicked(self, item):\n modelS = self.model.get(\"stations\")\n modelP = self.model.get(\"parametres\")\n\n ## on recupere la liste courante \n liste = self.modelCur.stringList()\n\n ## on verifie que l'element ne soit pas deja dans la liste du model\n if((item.data() in liste)==0):\n \n ## on ajoute l'item dans la liste\n liste.append(item.data()) \n\n ## on recupere l'element selectionne\n elt = modelS.getStation(item.data())\n \n if(elt==None):\n elt = modelP.getParametre(item.data())\n\n ## on ajoute l'element selectionne de la liste courante\n self.listCur.append(elt)\n\n ## on change la liste du model \n self.modelCur.setStringList(liste)\n\n def buttonEdit_clicked(self):\n modelG = self.model.get(\"groupes\")\n modelS = self.model.get(\"stations\")\n modelP = self.model.get(\"parametres\")\n\n ## on recupere le statut (station ou parametre)\n e = self.listCur[0]\n statut = e.getStatut()\n\n ## si pere n'est pas nul on modifie donc un groupe\n if(self.pere!=None):\n if (statut == \"station\"):\n ## on recupere les elements de la liste courante initiale et la liste des elements de la nouvel liste\n listOldFils = modelS.getListStation_Noms(self.listNomCur)\n \n elif (statut == \"parametre\"):\n listOldFils = modelP.getListParametre_Noms(self.listNomCur)\n\n listFils = self.listCur\n\n ## on change le fils du groupe\n modelG.setFils(self.pere,listOldFils,listFils)\n self.close()\n \n else:\n ## on modifie le nom d'une groupe donc on verifie qu'il n'y ai qu'un element\n if(len(self.modelCur.stringList())==1):\n\n ## on recupere l'element qui correspond au nouveau pere\n nomPere = self.modelCur.stringList()[0]\n if (statut == \"station\"):\n newPere = modelS.getStation(nomPere)\n \n elif (statut == \"parametre\"):\n newPere = modelP.getParametre(nomPere)\n\n ## on change le pere du groupe\n modelG.setPere(self.oldPere,newPere)\n self.close()\n \n else:\n messageBox = QtWidgets.QMessageBox()\n messageBox.setWindowTitle(\"Erreur\")\n messageBox.setText(\"Vous devez avoir uniquement un nom.\")\n messageBox.exec()\n\n def ouvrir(self, listCur , pere):\n ## on initialise les attributs de la classe\n self.listCur = listCur\n self.pere = pere\n\n ## on recupere l'ancien nom de groupe si l'on modifie le nom du groupe\n if (self.pere == None):\n self.oldPere = self.listCur[0]\n \n self.refresh()\n self.show()\n \n\n \n", "sub_path": "Python/Fenetres/GroupeEdit.py", "file_name": "GroupeEdit.py", "file_ext": "py", "file_size_in_byte": 6340, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 4, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 4, "usage_type": "name"}, {"api_name": "UI.UIgroupeEdit.Ui_Form", "line_number": 4, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QStringListModel", "line_number": 53, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 53, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QStringListModel", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 54, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 65, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 65, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 66, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 66, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 157, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 157, "usage_type": "name"}]} +{"seq_id": "584894996", "text": "from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import CommentViewSet, ReviewViewSet\n\nrouter = DefaultRouter()\n\nrouter.register(r'', ReviewViewSet, basename='Reviews')\nrouter.register(\n r'(?P\\d+)/comments',\n CommentViewSet,\n basename='Comments'\n)\n\nurlpatterns = [\n path('', include(router.urls)),\n\n]\n", "sub_path": "reviews/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 375, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 6, "usage_type": "call"}, {"api_name": "views.ReviewViewSet", "line_number": 8, "usage_type": "argument"}, {"api_name": "views.CommentViewSet", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "86083909", "text": "import cherrypy\nimport os\nimport sys\n\nclass Server(object):\n\n def __init__(self, options):\n # First, figure out where we are.\n self.base_dir = os.path.normpath(os.path.abspath(options.basedir))\n\n # Configure our server.\n self.conf_path = os.path.join(self.base_dir, \"conf\")\n cherrypy.config.update(os.path.join(self.conf_path, \"server.conf\"))\n\n # All python to see modulues we have written\n sys.path.insert(0, self.base_dir)\n\n # This is a hack to make cherrypy work on heroku. Ugh.\n from cherrypy.process import servers\n def fake_wait_for_occupied_port(host, port): return\n servers.wait_for_occupied_port = fake_wait_for_occupied_port\n\n # Initialize our global state. This will eventually be a database.\n from api import global_state\n global_state.init()\n\n # Initialize the API\n from api.state import State\n cherrypy.tree.mount(State(), '/api/state', {\n '/': {\n 'request.dispatch': cherrypy.dispatch.MethodDispatcher()\n }\n })\n\n # Initialize the custom arduino endpoint(s)\n from api.arduino import ArduinoInterface\n cherrypy.tree.mount(ArduinoInterface(), '/arduino', {})\n\n # Initialize all our static content.\n cherrypy.tree.mount(None, '/',\n os.path.join(self.conf_path, 'app.conf'))\n\n def run(self):\n cherrypy.engine.start();\n cherrypy.engine.block();\n\nif __name__ == '__main__':\n\n from optparse import OptionParser\n\n def parse_cl():\n curdir = os.path.normpath(os.path.abspath(os.path.curdir))\n\n parser = OptionParser()\n parser.add_option('-b', '--base-dir', dest='basedir',\n help='Base directory from which the server is'\\\n 'launched (default: %s)' % curdir)\n parser.set_defaults(basedir=curdir)\n (options, args) = parser.parse_args()\n\n return options\n \n Server(parse_cl()).run()\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2004, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.normpath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cherrypy.config.update", "line_number": 13, "usage_type": "call"}, {"api_name": "cherrypy.config", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cherrypy.process.servers.wait_for_occupied_port", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cherrypy.process.servers", "line_number": 21, "usage_type": "name"}, {"api_name": "api.global_state.init", "line_number": 25, "usage_type": "call"}, {"api_name": "api.global_state", "line_number": 25, "usage_type": "name"}, {"api_name": "cherrypy.tree.mount", "line_number": 29, "usage_type": "call"}, {"api_name": "cherrypy.tree", "line_number": 29, "usage_type": "attribute"}, {"api_name": "api.state.State", "line_number": 29, "usage_type": "call"}, {"api_name": "cherrypy.dispatch.MethodDispatcher", "line_number": 31, "usage_type": "call"}, {"api_name": "cherrypy.dispatch", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cherrypy.tree.mount", "line_number": 37, "usage_type": "call"}, {"api_name": "cherrypy.tree", "line_number": 37, "usage_type": "attribute"}, {"api_name": "api.arduino.ArduinoInterface", "line_number": 37, "usage_type": "call"}, {"api_name": "cherrypy.tree.mount", "line_number": 40, "usage_type": "call"}, {"api_name": "cherrypy.tree", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cherrypy.engine.start", "line_number": 44, "usage_type": "call"}, {"api_name": "cherrypy.engine", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cherrypy.engine.block", "line_number": 45, "usage_type": "call"}, {"api_name": "cherrypy.engine", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 52, "usage_type": "call"}, {"api_name": "optparse.OptionParser", "line_number": 54, "usage_type": "call"}, {"api_name": "{'servers': 'cherrypy.process.servers', 'global_state': 'api.global_state', 'State': 'api.state.State', 'ArduinoInterface': 'api.arduino.ArduinoInterface'}", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "638559111", "text": "import flask\nfrom flask import request, jsonify\n\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\nbooks = [\n {'id': 0,\n 'title': 'A Fire Upon the Deep',\n 'author': 'Vernor Vinge',\n 'first_sentence': 'The coldsleep itself was dreamless.',\n 'year_published': '1992'},\n {'id': 1,\n 'title': 'The Ones Who Walk Away From Omelas',\n 'author': 'Ursula K. Le Guin',\n 'first_sentence': 'With a clamor of bells that set the swallows soaring, the Festival of Summer came to the city Omelas, bright-towered by the sea.',\n 'published': '1973'},\n {'id': 2,\n 'title': 'Dhalgren',\n 'author': 'Samuel R. Delany',\n 'first_sentence': 'to wound the autumnal city.',\n 'published': '1975'}\n]\n\n@app.route('/', methods=['GET'])\ndef home():\n return '''

SF section

\n

A prototype API.

'''\n\n@app.route('/api/v1/resources/books/all', methods=['GET'])\ndef api_all():\n return jsonify(books)\n\n@app.route('/api/v1/resources/books', methods=['POST'])\ndef api_post():\n if not request.json or not 'title' in request.json:\n print('400')\n book = {\n 'id': books[-1]['id'] + 1,\n 'title': request.json['title'],\n 'author': request.json['author'],\n 'first_sentence': request.json['first_sentence'],\n 'published':request.json['published']\n }\n\n books.append(book)\n return jsonify({'book': book}), 201\n\napp.run()\n\n\n\n\n\n", "sub_path": "api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 1412, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "478191134", "text": "import logging\nimport time\nimport urllib.parse as urlparse\nimport uuid\nfrom datetime import datetime\n\nimport wechatpy\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.db.models import Count\nfrom django.http import HttpResponse\nfrom django.shortcuts import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils.timezone import now\nfrom rest_framework import status\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom wechatpy import parse_message\nfrom wechatpy.crypto import WeChatCrypto\nfrom wechatpy.exceptions import InvalidSignatureException, InvalidAppIdException\nfrom wechatpy.utils import check_signature\n\nfrom car.models import Car\nfrom customer.rest.apis import current_reserve_car_ids\nfrom operation.models import Customer, User, QrcodeAttention, Member, WxRecoRela, ZerocarToken\nfrom wechat.helpers import get_wechat_oauth\nfrom wechat.tasks import send_msg\nfrom wechat.utils import devide_chunks\nfrom zerocar.utils import OpenidToken, current_timestamp\n\nlogger = logging.getLogger('zerocar.WECHAT')\n\n\ndef auth(request):\n \"\"\"\n 授权完成后,最终跳转至success_redirect_uri\n 指定的url,如果用户已经注册,最终query string里面会带有有token,否则是openid\n \"\"\"\n next = request.GET.get('next', settings.WECHAT_OAUTH_SUCCESS_DEFAULT_REDIRECT_URI)\n if request.META['HTTP_HOST'] == 'api.izerocar.com':\n success_redirect_uri = request.GET.get('success_redirect_uri', 'https://wx.izerocar.com/')\n else:\n success_redirect_uri = request.GET.get('success_redirect_uri',\n settings.WECHAT_OAUTH_SUCCESS_DEFAULT_REDIRECT_URI)\n\n params = {'next': next, 'success_redirect_uri': success_redirect_uri}\n redirect_uri = '{}?{}'.format(\n request.build_absolute_uri(reverse('wechat-auth-redirect-handler')),\n urlparse.urlencode(params))\n\n # 绝对路径最好是能可配的,写死了不太友好\n if settings.ENVIRONMENT == 'DEVELOPMENT' and settings.ROOT_URL:\n redirect_uri = '{}?{}'.format(\n '%s%s' % (settings.ROOT_URL, reverse('wechat-auth-redirect-handler')),\n urlparse.urlencode(params))\n\n wechat_oauth = wechatpy.oauth.WeChatOAuth(\n app_id=settings.WECHAT_APP_ID,\n secret=settings.WECHAT_APP_SECRET,\n redirect_uri=redirect_uri,\n scope=settings.WECHAT_OAUTH_SCOPE)\n\n return HttpResponseRedirect(wechat_oauth.authorize_url)\n\n\ndef auth_redirect_handler(request):\n \"\"\"\n 微信网页授权 oauth 的第二步, 这个可以根据业务需要,把用户重定向到不同(版本)的 网页前端。\n \"\"\"\n wechat_oauth = get_wechat_oauth()\n code = request.GET['code']\n logger.debug('wechat auth code {}'.format(code))\n access_token_data = wechat_oauth.fetch_access_token(code)\n logger.info(f'use code {code} get access_token {access_token_data}')\n user_info_data = wechat_oauth.get_user_info(\n openid=access_token_data['openid'],\n access_token=access_token_data['access_token'],\n lang='zh_CN')\n logger.debug('user_info_data:%s', user_info_data)\n\n next = request.GET.get('next')\n params = {'next': next}\n # 用户已经注册返回token,否则返回openid\n try:\n customer = Customer.objects.get(unionid=user_info_data['unionid'])\n except Customer.DoesNotExist:\n params['openid'] = OpenidToken.generate_token(user_info_data['openid'])\n else:\n logger.info(\n f'把pk为:{customer.pk}的Customer的wx_openid由{customer.wx_openid}更新为{user_info_data[\"openid\"]}'\n )\n customer.wx_openid = user_info_data['openid']\n customer.nick_name = user_info_data['nickname']\n customer.head_imgurl = user_info_data['headimgurl']\n customer.save()\n try:\n User.objects.get(customer=customer)\n except User.DoesNotExist:\n User.objects.create(customer=customer, username=uuid.uuid1())\n try:\n token = ZerocarToken.objects.get(user=customer.user, type=ZerocarToken.Types.WECHAT)\n except ZerocarToken.DoesNotExist:\n token = None\n if token is None or (now() - token.created).total_seconds() > settings.TOKEN_EXPIRED_IN:\n # token不存在或 过期\n logger.debug('TOKEN IS EXPIRED:%s', customer.mobile)\n params['openid'] = OpenidToken.generate_token(user_info_data['openid'])\n params['token_expired'] = 1\n else:\n params['token'] = token.key\n params['user_id'] = customer.user.id\n\n query = urlparse.urlencode(params)\n success_redirect_uri = '{}#/wechat_login'.format(request.GET.get('success_redirect_uri'))\n url = '{}?{}'.format(success_redirect_uri, query)\n logger.debug(f'after wechat auth,redirect to:{url}')\n return HttpResponseRedirect('{}?{}'.format(success_redirect_uri, query))\n\n\nclass GateWay(APIView):\n \"\"\"\n 接收微信消息和事件\n \"\"\"\n permission_classes = (AllowAny, )\n\n def get(self, request):\n signature = request.query_params.get('signature')\n timestamp = request.query_params.get('timestamp')\n nonce = request.query_params.get('nonce')\n echostr = request.query_params.get('echostr')\n token = settings.WECHAT_APP_TOKEN\n\n try:\n check_signature(token, signature, timestamp, nonce)\n except InvalidSignatureException: # 处理异常情况或忽略\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n return HttpResponse(echostr)\n\n @transaction.atomic\n def post(self, request):\n signature = request.query_params.get('signature', '')\n timestamp = request.query_params.get('timestamp', '')\n nonce = request.query_params.get('nonce', '')\n msg_signature = request.query_params.get('msg_signature', '')\n\n try:\n check_signature(settings.WECHAT_APP_TOKEN, signature, timestamp, nonce)\n except InvalidSignatureException:\n logger.error('check_signature error')\n return HttpResponse('')\n crypto = WeChatCrypto(settings.WECHAT_APP_TOKEN, settings.WECHAT_ENCODINGAESKEY,\n settings.WECHAT_APP_ID)\n try:\n msg = crypto.decrypt_message(request.body, msg_signature, timestamp, nonce)\n logger.info(msg)\n except (InvalidSignatureException, InvalidAppIdException):\n logger.error('decrypt_message error')\n return HttpResponse('')\n\n message = parse_message(msg)\n source = message.source\n if message.type == 'event':\n event = message.event\n if event == 'subscribe_scan':\n scene_id = message.scene_id\n logger.info(f'scene_id: {scene_id}, source: {source}')\n try:\n attention = QrcodeAttention.objects.get(pk=scene_id)\n except QrcodeAttention.DoesNotExist:\n logger.info(f'not found QrcodeAttention pk {scene_id}')\n return HttpResponse('')\n if not Member.objects.filter(\n customer__wx_openid=source).exists() and not WxRecoRela.objects.filter(\n to_wxopenid=source).exists():\n logger.info('if ok')\n WxRecoRela.objects.create(\n reco_user_id=attention.source_id,\n to_wxopenid=source,\n cr_time=current_timestamp())\n\n elif message.type == 'text':\n try:\n request_member = Member.objects.get(customer__wx_openid=source)\n except Member.DoesNotExist:\n return HttpResponse('')\n\n if not (request_member.mobile in settings.LOW_BATTERY_CAR_SEND_TEL\n and message.content == settings.LOW_BATTERY_CAR_REQUEST):\n return HttpResponse('')\n\n low_battery_info = list()\n cars = Car.objects.filter(\n is_using=0,\n sur_elec__lt=settings.LOW_BATTERY_THRESHOLD,\n is_enable=Car.Enable.ENABLE,\n state=Car.States.NORMAL).exclude(id__in=current_reserve_car_ids())\n sorted_station = cars.values('last_car_outlet').annotate(\n car_count=Count('last_car_outlet')).order_by('-car_count')\n\n for station in sorted_station:\n station_cars = cars.filter(\n last_car_outlet=station['last_car_outlet']).order_by('sur_elec')\n for car in station_cars:\n low_battery_info.append(\n f'{len(low_battery_info)+1}.{car.car_number}, 电量:{car.sur_elec}'\n f'%, 网点:{car.last_car_outlet.co_name}\\n')\n title = f'低电量在线车辆详情\\n{datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")}\\n================\\n' \\\n f'电量<{settings.LOW_BATTERY_THRESHOLD}%(在线)未使用:{len(low_battery_info)}台\\n' \\\n f'----------------\\n'\n low_battery_info[0] = title + low_battery_info[0]\n\n for msg in devide_chunks(low_battery_info, 20):\n send_msg.delay(source, ''.join(msg))\n time.sleep(0.3)\n\n return HttpResponse('')\n\n return HttpResponse('')\n", "sub_path": "simplegit/zerocar-master/wechat/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9433, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.settings.WECHAT_OAUTH_SUCCESS_DEFAULT_REDIRECT_URI", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "django.conf.settings.WECHAT_OAUTH_SUCCESS_DEFAULT_REDIRECT_URI", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 45, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 49, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 50, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 50, "usage_type": "name"}, {"api_name": "django.conf.settings.ENVIRONMENT", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 53, "usage_type": "name"}, {"api_name": "django.conf.settings.ROOT_URL", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.conf.settings.ROOT_URL", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 55, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 55, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 56, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 56, "usage_type": "name"}, {"api_name": "wechatpy.oauth.WeChatOAuth", "line_number": 58, "usage_type": "call"}, {"api_name": "wechatpy.oauth", "line_number": 58, "usage_type": "attribute"}, {"api_name": "django.conf.settings.WECHAT_APP_ID", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 59, "usage_type": "name"}, {"api_name": "django.conf.settings.WECHAT_APP_SECRET", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 60, "usage_type": "name"}, {"api_name": "django.conf.settings.WECHAT_OAUTH_SCOPE", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 62, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 64, "usage_type": "call"}, {"api_name": "wechat.helpers.get_wechat_oauth", "line_number": 71, "usage_type": "call"}, {"api_name": "customer.rest.apis", "line_number": 86, "usage_type": "name"}, {"api_name": "operation.models.Customer.objects.get", "line_number": 86, "usage_type": "call"}, {"api_name": "operation.models.Customer.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "operation.models.Customer", "line_number": 86, "usage_type": "name"}, {"api_name": "operation.models.Customer.DoesNotExist", "line_number": 87, "usage_type": "attribute"}, {"api_name": "operation.models.Customer", "line_number": 87, "usage_type": "name"}, {"api_name": "zerocar.utils.OpenidToken.generate_token", "line_number": 88, "usage_type": "call"}, {"api_name": "zerocar.utils.OpenidToken", "line_number": 88, "usage_type": "name"}, {"api_name": "customer.rest.apis.pk", "line_number": 91, "usage_type": "attribute"}, {"api_name": "customer.rest.apis", "line_number": 91, "usage_type": "name"}, {"api_name": "customer.rest.apis.wx_openid", "line_number": 91, "usage_type": "attribute"}, {"api_name": "customer.rest.apis.wx_openid", "line_number": 93, "usage_type": "attribute"}, {"api_name": "customer.rest.apis", "line_number": 93, "usage_type": "name"}, {"api_name": "customer.rest.apis.nick_name", "line_number": 94, "usage_type": "attribute"}, {"api_name": "customer.rest.apis", "line_number": 94, "usage_type": "name"}, {"api_name": "customer.rest.apis.head_imgurl", "line_number": 95, "usage_type": "attribute"}, {"api_name": "customer.rest.apis", "line_number": 95, "usage_type": "name"}, {"api_name": "customer.rest.apis.save", "line_number": 96, "usage_type": "call"}, {"api_name": "customer.rest.apis", "line_number": 96, "usage_type": "name"}, {"api_name": "operation.models.User.objects.get", "line_number": 98, "usage_type": "call"}, {"api_name": "operation.models.User.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "operation.models.User", "line_number": 98, "usage_type": "name"}, {"api_name": "customer.rest.apis", "line_number": 98, "usage_type": "name"}, {"api_name": "operation.models.User.DoesNotExist", "line_number": 99, "usage_type": "attribute"}, {"api_name": "operation.models.User", "line_number": 99, "usage_type": "name"}, {"api_name": "operation.models.User.objects.create", "line_number": 100, "usage_type": "call"}, {"api_name": "operation.models.User.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "operation.models.User", "line_number": 100, "usage_type": "name"}, {"api_name": "customer.rest.apis", "line_number": 100, "usage_type": "name"}, {"api_name": "uuid.uuid1", "line_number": 100, "usage_type": "call"}, {"api_name": "operation.models.ZerocarToken.objects.get", "line_number": 102, "usage_type": "call"}, {"api_name": "operation.models.ZerocarToken.objects", "line_number": 102, "usage_type": "attribute"}, {"api_name": "operation.models.ZerocarToken", "line_number": 102, "usage_type": "name"}, {"api_name": "customer.rest.apis.user", "line_number": 102, "usage_type": "attribute"}, {"api_name": "customer.rest.apis", "line_number": 102, "usage_type": "name"}, {"api_name": "operation.models.ZerocarToken.Types", "line_number": 102, "usage_type": "attribute"}, {"api_name": "operation.models.ZerocarToken.DoesNotExist", "line_number": 103, "usage_type": "attribute"}, {"api_name": "operation.models.ZerocarToken", "line_number": 103, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 105, "usage_type": "call"}, {"api_name": "django.conf.settings.TOKEN_EXPIRED_IN", "line_number": 105, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 105, "usage_type": "name"}, {"api_name": "customer.rest.apis.mobile", "line_number": 107, "usage_type": "attribute"}, {"api_name": "customer.rest.apis", "line_number": 107, "usage_type": "name"}, {"api_name": "zerocar.utils.OpenidToken.generate_token", "line_number": 108, "usage_type": "call"}, {"api_name": "zerocar.utils.OpenidToken", "line_number": 108, "usage_type": "name"}, {"api_name": "customer.rest.apis.user", "line_number": 112, "usage_type": "attribute"}, {"api_name": "customer.rest.apis", "line_number": 112, "usage_type": "name"}, {"api_name": "urllib.parse.urlencode", "line_number": 114, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 114, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 118, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 121, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 125, "usage_type": "name"}, {"api_name": "django.conf.settings.WECHAT_APP_TOKEN", "line_number": 132, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 132, "usage_type": "name"}, {"api_name": "wechatpy.utils.check_signature", "line_number": 135, "usage_type": "call"}, {"api_name": "wechatpy.exceptions.InvalidSignatureException", "line_number": 136, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 137, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 137, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 137, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 139, "usage_type": "call"}, {"api_name": "wechatpy.utils.check_signature", "line_number": 149, "usage_type": "call"}, {"api_name": "django.conf.settings.WECHAT_APP_TOKEN", "line_number": 149, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 149, "usage_type": "name"}, {"api_name": "wechatpy.exceptions.InvalidSignatureException", "line_number": 150, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 152, "usage_type": "call"}, {"api_name": "wechatpy.crypto.WeChatCrypto", "line_number": 153, "usage_type": "call"}, {"api_name": "django.conf.settings.WECHAT_APP_TOKEN", "line_number": 153, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 153, "usage_type": "name"}, {"api_name": "django.conf.settings.WECHAT_ENCODINGAESKEY", "line_number": 153, "usage_type": "attribute"}, {"api_name": "django.conf.settings.WECHAT_APP_ID", "line_number": 154, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 154, "usage_type": "name"}, {"api_name": "wechatpy.exceptions.InvalidSignatureException", "line_number": 158, "usage_type": "name"}, {"api_name": "wechatpy.exceptions.InvalidAppIdException", "line_number": 158, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 160, "usage_type": "call"}, {"api_name": "wechatpy.parse_message", "line_number": 162, "usage_type": "call"}, {"api_name": "operation.models.QrcodeAttention.objects.get", "line_number": 170, "usage_type": "call"}, {"api_name": "operation.models.QrcodeAttention.objects", "line_number": 170, "usage_type": "attribute"}, {"api_name": "operation.models.QrcodeAttention", "line_number": 170, "usage_type": "name"}, {"api_name": "operation.models.QrcodeAttention.DoesNotExist", "line_number": 171, "usage_type": "attribute"}, {"api_name": "operation.models.QrcodeAttention", "line_number": 171, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 173, "usage_type": "call"}, {"api_name": "operation.models.Member.objects.filter", "line_number": 174, "usage_type": "call"}, {"api_name": "operation.models.Member.objects", "line_number": 174, "usage_type": "attribute"}, {"api_name": "operation.models.Member", "line_number": 174, "usage_type": "name"}, {"api_name": "operation.models.WxRecoRela.objects.filter", "line_number": 175, "usage_type": "call"}, {"api_name": "operation.models.WxRecoRela.objects", "line_number": 175, "usage_type": "attribute"}, {"api_name": "operation.models.WxRecoRela", "line_number": 175, "usage_type": "name"}, {"api_name": "operation.models.WxRecoRela.objects.create", "line_number": 178, "usage_type": "call"}, {"api_name": "operation.models.WxRecoRela.objects", "line_number": 178, "usage_type": "attribute"}, {"api_name": "operation.models.WxRecoRela", "line_number": 178, "usage_type": "name"}, {"api_name": "zerocar.utils.current_timestamp", "line_number": 181, "usage_type": "call"}, {"api_name": "operation.models.Member.objects.get", "line_number": 185, "usage_type": "call"}, {"api_name": "operation.models.Member.objects", "line_number": 185, "usage_type": "attribute"}, {"api_name": "operation.models.Member", "line_number": 185, "usage_type": "name"}, {"api_name": "operation.models.Member.DoesNotExist", "line_number": 186, "usage_type": "attribute"}, {"api_name": "operation.models.Member", "line_number": 186, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 187, "usage_type": "call"}, {"api_name": "django.conf.settings.LOW_BATTERY_CAR_SEND_TEL", "line_number": 189, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 189, "usage_type": "name"}, {"api_name": "django.conf.settings.LOW_BATTERY_CAR_REQUEST", "line_number": 190, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 190, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 191, "usage_type": "call"}, {"api_name": "car.models.Car.objects.filter", "line_number": 194, "usage_type": "call"}, {"api_name": "car.models.Car.objects", "line_number": 194, "usage_type": "attribute"}, {"api_name": "car.models.Car", "line_number": 194, "usage_type": "name"}, {"api_name": "django.conf.settings.LOW_BATTERY_THRESHOLD", "line_number": 196, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 196, "usage_type": "name"}, {"api_name": "car.models.Car.Enable", "line_number": 197, "usage_type": "attribute"}, {"api_name": "car.models.Car", "line_number": 197, "usage_type": "name"}, {"api_name": "car.models.Car.States", "line_number": 198, "usage_type": "attribute"}, {"api_name": "car.models.Car", "line_number": 198, "usage_type": "name"}, {"api_name": "customer.rest.apis.current_reserve_car_ids", "line_number": 198, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 200, "usage_type": "call"}, {"api_name": "car.models", "line_number": 205, "usage_type": "name"}, {"api_name": "car.models.car_number", "line_number": 207, "usage_type": "attribute"}, {"api_name": "car.models", "line_number": 207, "usage_type": "name"}, {"api_name": "car.models.sur_elec", "line_number": 207, "usage_type": "attribute"}, {"api_name": "car.models.last_car_outlet", "line_number": 208, "usage_type": "attribute"}, {"api_name": "car.models", "line_number": 208, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 209, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 209, "usage_type": "name"}, {"api_name": "django.conf.settings.LOW_BATTERY_THRESHOLD", "line_number": 210, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 210, "usage_type": "name"}, {"api_name": "wechat.utils.devide_chunks", "line_number": 214, "usage_type": "call"}, {"api_name": "wechat.tasks.send_msg.delay", "line_number": 215, "usage_type": "call"}, {"api_name": "wechat.tasks.send_msg", "line_number": 215, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 216, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 218, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 220, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 141, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 141, "usage_type": "name"}]} +{"seq_id": "411378914", "text": "'''Train a simple deep CNN on a HeLa dataset.\nGPU run command:\n\tTHEANO_FLAGS='mode=FAST_RUN,device=gpu,floatX=float32' python training_template.py\n\n'''\n\nfrom __future__ import print_function\nfrom keras.optimizers import SGD, RMSprop\n\nfrom cnn_functions import rate_scheduler, train_model_sample\nfrom model_zoo import feature_net_61x61 as the_model\n\nimport os\nimport datetime\nimport numpy as np\n\nbatch_size = 256\nn_classes = 3\nn_epoch = 25\n\nmodel = the_model(n_channels = 2, n_features = 3, reg = 1e-3, drop=0.5)\ndataset = \"RAW40X_all_61x61\"\ndirec_save = \"/home/nquach/DeepCell2/trained_networks/\"\ndirec_data = \"/home/nquach/DeepCell2/training_data_npz/\"\noptimizer = RMSprop(lr = 0.001, rho = 0.95, epsilon = 1e-8)\nlr_sched = rate_scheduler(lr = 0.001, decay = 0.95)\nexpt = \"feature_net_61x61_drop_reg3\"\n\niterate = 2\ntrain_model_sample(model = model, dataset = dataset, optimizer = optimizer, \n\texpt = expt, it = iterate, batch_size = batch_size, n_epoch = n_epoch,\n\tdirec_save = direc_save, \n\tdirec_data = direc_data, \n\tlr_sched = lr_sched,\n\trotate = True, flip = True, shear = 0)", "sub_path": "prototypes/training_scripts/RAW40X/RAW_61x61_drop_reg3_3.py", "file_name": "RAW_61x61_drop_reg3_3.py", "file_ext": "py", "file_size_in_byte": 1080, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "model_zoo.feature_net_61x61", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.optimizers.RMSprop", "line_number": 25, "usage_type": "call"}, {"api_name": "cnn_functions.rate_scheduler", "line_number": 26, "usage_type": "call"}, {"api_name": "cnn_functions.train_model_sample", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "502623903", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n#url_for takes these arguments as well\n# anchor specified the anchor name to be appened to the path\n# host overrides the default (current) host if provided\n# protocol overrides the default (current) protocol if provided\n# qualified creates the URL with the host/port information as\n# needed\n\n# TODO: add javascript escape code here so it's available in the template engine\n\nfrom datetime import datetime\nfrom routes import url_for\nfrom mako import filters\n\n# from markdown import markdown\n\ndef as_p(input_str):\n lines = input_str.splitlines()\n return unicode(\"\".join([u\"

{0}

\".format(line) for line in lines]))\n\nclass tag(object):\n def set(self, **kargs):\n self.attribs.extend([u'''{0}=\"{1}\"'''.format(k.rstrip('_'), v) for k,v in kargs.items() ])\n return self\n\n\nclass img(tag):\n def __init__(self,src='', **kargs):\n self.img_src = src\n self.attribs = []\n self.set(**kargs)\n if \"alt\" not in kargs:\n self.set(alt=self.img_src)\n\n def __str__(self):\n '''Return the image in string form.'''\n return u''''''.format(self.img_src, \" \".join(self.attribs) )\n\n\nclass anchor(tag):\n def __init__(self,link_text='', name='', **kargs):\n self.link_text = link_text\n if name:\n self.url = name\n else:\n self.url=self.link_text\n self.attribs = []\n self.set(**kargs)\n\n def __str__(self):\n '''Return the anchor in string form.'''\n attr = \" \".join(self.attribs)\n return u'''{2}'''.format(self.url,\n \" \".join(self.attribs),\n self.link_text)\n\n\nclass link(tag):\n def __init__(self,link_text='', **kargs):\n self.link_text = link_text\n self.url = \"#\"\n self.attribs = []\n self.set(**kargs)\n\n def filter(self, filter_type=\"h\"):\n self.link_text = filters.html_escape(self.link_text)\n return self\n\n def to(self, *pargs, **kargs):\n self.url = url_for(*pargs, **kargs)\n return self\n\n def __str__(self):\n '''Return the link in string form.'''\n attr = \" \".join(self.attribs)\n return u'''{2}'''.format(self.url, attr, self.link_text)\n\ndef plural(list_object):\n '''Return \"s\" for > 1 items'''\n if len(list_object) > 1:\n return \"s\"\n else:\n return \"\"\n\n\ndef humanize(date_string):\n format = \"%Y-%m-%d %H:%M:%S\"\n try:\n date = datetime.strptime(date_string, format)\n except:\n return date_string\n now = datetime.now()\n delta = now - date\n plural = 's'\n if delta.days < 0:\n return \"in the future\"\n elif delta.days >= 1:\n if delta.days == 1:\n plural = ''\n return \"%s day%s ago\" % (str(delta.days),plural)\n # > 1 hour, display in hours\n elif delta.seconds > 3600:\n hours = int(round(delta.seconds / 3600.0))\n if hours == 1:\n plural = ''\n return \"%s hour%s ago\" % (str(hours),plural)\n elif delta.seconds > 60:\n minutes = int(round(delta.seconds / 60.0))\n if minutes == 1:\n plural = ''\n return \"%s minute%s ago\" % (str(minutes),plural)\n else:\n return \"just a moment ago\"", "sub_path": "pybald/core/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 3421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "mako.filters.html_escape", "line_number": 68, "usage_type": "call"}, {"api_name": "mako.filters", "line_number": 68, "usage_type": "name"}, {"api_name": "routes.url_for", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 91, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "294436689", "text": "import requests\nimport json\nfrom datetime import date, timedelta\n\nyesterday = (date.today() + timedelta(days=-1)).strftime(\"%Y%m%d\")\n\nk = 0\ndict = []\n\nfile_name = \"dict.json\"\nf = open(file_name, \"r\", encoding=\"utf-8\")\ndict_old = json.load(f)\nf.close()\n\nfor item in dict_old:\n response = requests.get(\"http://img1.money.126.net/data/hs/time/4days/0\" + str(item[0]).zfill(6) + \".json\")\n if response.status_code == 200 and response.json()[\"data\"][0][\"date\"] == yesterday:\n temp = [str(response.json()[\"symbol\"]), response.json()[\"name\"]]\n dict.append(temp)\n data = response.json()[\"data\"]\n for i in range(0, 4):\n file_name = \"data/\" + response.json()[\"symbol\"] + \"_\" + data[i][\"date\"] + \".json\"\n f = open(file_name, \"w\", encoding=\"utf-8\")\n json.dump(data[i][\"data\"], f, indent=4, ensure_ascii=False)\n f.close()\n\n # back up\n file_name = \"data_backup/\" + data[i][\"date\"] + \"_\" + response.json()[\"symbol\"] + \".json\"\n f = open(file_name, \"w\", encoding=\"utf-8\")\n json.dump(data[i][\"data\"], f, indent=4, ensure_ascii=False)\n f.close()\n k += 1\n print(\"Finish: \" + str(k) + \"/ \" + str(len(dict_old)))\n\nfile_name = \"dict.json\"\nf = open(file_name, \"w\", encoding=\"utf-8\")\njson.dump(dict, f, indent=4, ensure_ascii=False)\nf.close()\n\nfile_name = \"data/dict.json\"\nf = open(file_name, \"w\", encoding=\"utf-8\")\njson.dump(dict, f, indent=4, ensure_ascii=False)\nf.close()\n\nfile_name = \"data_backup/dict.json\"\nf = open(file_name, \"w\", encoding=\"utf-8\")\njson.dump(dict, f, indent=4, ensure_ascii=False)\nf.close()\n", "sub_path": "updata_data.py", "file_name": "updata_data.py", "file_ext": "py", "file_size_in_byte": 1627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "datetime.date.today", "line_number": 5, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 5, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 5, "usage_type": "call"}, {"api_name": "json.load", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 24, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 30, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 37, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 42, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "142966906", "text": "#coding:utf8\nfrom hifcampus.extensions import db\nfrom hifcampus.models import Hifuser,Hifcomment\nimport datetime\nimport json\nfrom mongoengine.queryset import queryset_manager\n\n\nclass Hifgrapevine(db.Document):\n \"\"\"Grapevane model\"\"\"\n id = db.IntField(primary_key=True,verbose_name=u\"ID\")\n title = db.StringField(verbose_name=u\"标题\",unique=True)\n author = db.ReferenceField(Hifuser,verbose_name=u\"来源\")\n content = db.StringField(verbose_name=u\"内容\")\n comments = db.ListField(db.ReferenceField(Hifcomment))\n create_time = db.DateTimeField(default=datetime.datetime.utcnow(),verbose_name=u\"创建时间\")\n #0状态是待审核状态\n status = db.IntField(default=1,verbose_name=u\"状态\")\n isbanner = db.IntField(default=0)\n thumbnail = db.IntField(default=5)\n #增加信息类别\n #category = db.StringField(default=u\"新闻\",verbose_name=u\"新闻类别\")\n #自定义查询方法\n @queryset_manager\n def getlist(doc_cls,queryset,postid,perpage):\n '''\n 查询活动列表\n '''\n if postid==0:\n res = queryset.filter(status=1).exclude('content').exclude('comments').exclude('status').exclude('isbanner')[0:perpage]\n else:\n res = queryset.filter(status=1,id__lt=postid).exclude('content').exclude('comments').exclude('status').exclude('isbanner')[0:perpage]\n reslist = json.loads(res.to_json())\n for i,item in enumerate(res):\n reslist[i]['author_name'] = item.author.nickname\n reslist[i]['author_thumbnail'] = item.author.thumbnail\n return reslist\n @queryset_manager\n def getcontent(doc_cls,queryset,postid):\n '''\n 查询新闻具体信息\n '''\n try:\n res = queryset.get(status=1,id=postid)#.only('comments')\n except:\n return None\n return {'comments':res['comments'],'content':res['content']}\n\n\n meta = {\n 'ordering': ['-id'],\n }\n", "sub_path": "hifcampus/models/grapevine.py", "file_name": "grapevine.py", "file_ext": "py", "file_size_in_byte": 1964, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "hifcampus.extensions.db.Document", "line_number": 9, "usage_type": "attribute"}, {"api_name": "hifcampus.extensions.db", "line_number": 9, "usage_type": "name"}, {"api_name": "hifcampus.extensions.db.IntField", "line_number": 11, "usage_type": "call"}, {"api_name": "hifcampus.extensions.db", "line_number": 11, "usage_type": "name"}, {"api_name": "hifcampus.extensions.db.StringField", "line_number": 12, "usage_type": "call"}, {"api_name": "hifcampus.extensions.db", "line_number": 12, "usage_type": "name"}, {"api_name": "hifcampus.extensions.db.ReferenceField", "line_number": 13, "usage_type": "call"}, {"api_name": "hifcampus.models.Hifuser", "line_number": 13, "usage_type": "argument"}, {"api_name": "hifcampus.extensions.db", "line_number": 13, "usage_type": "name"}, {"api_name": "hifcampus.extensions.db.StringField", "line_number": 14, "usage_type": "call"}, {"api_name": "hifcampus.extensions.db", "line_number": 14, "usage_type": "name"}, {"api_name": "hifcampus.extensions.db.ListField", "line_number": 15, "usage_type": "call"}, {"api_name": "hifcampus.extensions.db", "line_number": 15, "usage_type": "name"}, {"api_name": "hifcampus.extensions.db.ReferenceField", "line_number": 15, "usage_type": "call"}, {"api_name": "hifcampus.models.Hifcomment", "line_number": 15, "usage_type": "argument"}, {"api_name": "hifcampus.extensions.db.DateTimeField", "line_number": 16, "usage_type": "call"}, {"api_name": "hifcampus.extensions.db", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "attribute"}, {"api_name": "hifcampus.extensions.db.IntField", "line_number": 18, "usage_type": "call"}, {"api_name": "hifcampus.extensions.db", "line_number": 18, "usage_type": "name"}, {"api_name": "hifcampus.extensions.db.IntField", "line_number": 19, "usage_type": "call"}, {"api_name": "hifcampus.extensions.db", "line_number": 19, "usage_type": "name"}, {"api_name": "hifcampus.extensions.db.IntField", "line_number": 20, "usage_type": "call"}, {"api_name": "hifcampus.extensions.db", "line_number": 20, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 33, "usage_type": "call"}, {"api_name": "mongoengine.queryset.queryset_manager", "line_number": 24, "usage_type": "name"}, {"api_name": "mongoengine.queryset.queryset_manager", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "418057905", "text": "import ipt\nimport mxnet as mx\nfrom my_layer import *\n\n\n'''pm refers to Params'''\n\npm= {\n 'c1':{\n 'fsize' : (7,7),\n 'fnum' : 8,\n 'pad' : (0,0),\n 'stride': (1,1),\n },\n 'c2':{\n 'fsize' : (3,3),\n 'fnum' : 16,\n 'pad' : (0,0),\n 'stride': (1,1),\n },\n 'c3':{\n 'fsize' : (3,3),\n 'fnum' : 32,\n 'pad' : (0,0),\n 'stride': (1,1)\n },\n 'c4':{\n 'fsize' : (3,3),\n 'fnum' : 64,\n 'pad' : (0,0),\n 'stride': (1,1)\n },\n 'c5':{\n 'fsize' : (3,3),\n 'fnum' : 64,\n 'pad' : (0,0),\n 'stride': (1,1)\n },\n 'c6':{\n 'fsize' : (3,3),\n 'fnum' : 64,\n 'pad' : (2,2),\n 'stride': (1,1)\n },\n 'c7':{\n 'fsize' : (3,3),\n 'fnum' : 64,\n 'pad' : (2,2),\n 'stride': (1,1)\n },\n 'c8':{\n 'fsize' : (7,7),\n 'fnum' : 64,\n 'pad' : (6,6),\n 'stride': (1,1)\n },\n 'c9':{\n 'fsize' : (3,3),\n 'fnum' : 16,\n 'pad' : (2,2),\n 'stride': (1,1)\n },\n 'c10':{\n 'fsize' : (7,7),\n 'fnum' : 8,\n 'pad' : (0,0),\n 'stride': (1,1)\n },\n 'c11':{\n 'fsize' : (7,7),\n 'fnum' : 1,\n 'pad' : (6,6),\n 'stride': (1,1)\n }\n }\n\n\n###############################################################\n###############################################################\n\n''' pm should be a dict of the params of each layers '''\ndata = mx.sym.Variable(name= 'data') #name must be data, don't know why\n\nconv1 = mx.sym.Convolution(name = 'conv1', data = data, kernel = pm['c1']['fsize'], \n num_filter = pm['c1']['fnum'], stride = pm['c1']['stride'], pad = pm['c1']['pad'] )\n\nbn1 = mx.sym.BatchNorm(data = conv1)\n\nrelu1 = mx.sym.Activation(data = bn1, act_type = 'relu')\nconv2 = mx.sym.Convolution(name = 'conv2', data = relu1, kernel = pm['c2']['fsize'], \n num_filter = pm['c2']['fnum'], stride = pm['c2']['stride'], pad = pm['c2']['pad'] )\nbn2 = mx.sym.BatchNorm(data = conv2)\n\nrelu2 = mx.sym.Activation(data = bn2, act_type = 'relu')\n\npool1 = mx.sym.Pooling(data = relu2, pool_type = \"max\", kernel=(2,2), stride = (2,2))\n\n\nconv3 = mx.sym.Convolution(name = 'conv3', data = pool1, kernel = pm['c3']['fsize'], \n num_filter = pm['c3']['fnum'], stride = pm['c3']['stride'], pad = pm['c3']['pad'] )\nbn3 = mx.sym.BatchNorm(data = conv3)\n\nrelu3 = mx.sym.Activation(data = bn3, act_type = 'relu')\npool2 = mx.sym.Pooling(data = relu3, pool_type = \"max\", kernel=(2,2), stride = (2,2))\n\n\nconv4 = mx.sym.Convolution(name = 'conv4', data = pool2, kernel = pm['c4']['fsize'], \n num_filter = pm['c4']['fnum'], stride = pm['c4']['stride'], pad = pm['c4']['pad'] )\nbn4 = mx.sym.BatchNorm(data = conv4)\n\nrelu4 = mx.sym.Activation(data = bn4, act_type = 'relu')\npool3 = mx.sym.Pooling(data = relu4, pool_type = \"max\", kernel=(2,2), stride = (2,2))\n\n\nconv5 = mx.sym.Convolution(name = 'conv5', data = pool3, kernel = pm['c5']['fsize'], \n num_filter = pm['c5']['fnum'], stride = pm['c5']['stride'], pad = pm['c5']['pad'] )\nbn5 = mx.sym.BatchNorm(data = conv5)\nrelu5 = mx.sym.Activation(data = bn5, act_type = 'relu')\nconv6 = mx.sym.Convolution(name = 'conv6', data = relu5, kernel = pm['c6']['fsize'], \n num_filter = pm['c6']['fnum'], stride = pm['c6']['stride'], pad = pm['c6']['pad'] )\nbn6 = mx.sym.BatchNorm(data = conv6)\n\nrelu6 = mx.sym.Activation(data = bn6, act_type = 'relu')\n\n\n# up1 = mx.sym.UpSampling(relu6, scale = 2, sample_type= 'bilinear', num_args = 1)\nup1 = mx.sym.Deconvolution(\n data = relu6, kernel = (4,4), stride = (2,2), pad = (1,1),\n num_filter = 64, no_bias = True\n )\n\nconv7 = mx.sym.Convolution(name = 'conv7', data = up1, kernel = pm['c7']['fsize'], \n num_filter = pm['c7']['fnum'], stride = pm['c7']['stride'], pad = pm['c7']['pad'] )\nbn7 = mx.sym.BatchNorm(data = conv7)\nrelu7 = mx.sym.Activation(data = bn7, act_type = 'relu')\n\n# up2 = mx.sym.UpSampling(relu7, scale = 2, sample_type = 'bilinear', num_args = 1)\nup2 = mx.sym.Deconvolution(\n data = relu7, kernel = (4,4), stride = (2,2), pad = (1,1),\n num_filter = 64, no_bias = True\n )\n\n\nconv8 = mx.sym.Convolution(name = 'conv8', data = up2, kernel = pm['c8']['fsize'], \n num_filter = pm['c8']['fnum'], stride = pm['c8']['stride'], pad = pm['c8']['pad'] )\nbn8 = mx.sym.BatchNorm(data = conv8)\n\nrelu8 = mx.sym.Activation(data = bn8, act_type = 'relu')\n\n# up3 = mx.sym.UpSampling(relu8, scale = 2, sample_type = 'bilinear', num_args = 1)\nup3 = mx.sym.Deconvolution(\n data = relu8, kernel = (4,4), stride = (2,2), pad = (1,1),\n num_filter = 32, no_bias = True\n )\n\nconv9 = mx.sym.Convolution(name = 'conv9', data = up3, kernel = pm['c9']['fsize'], \n num_filter = pm['c9']['fnum'], stride = pm['c9']['stride'], pad = pm['c9']['pad'] )\nbn9 = mx.sym.BatchNorm(data = conv9)\nrelu9 = mx.sym.Activation(data = bn9, act_type = 'relu')\n# conv10 = mx.sym.Convolution(name = 'conv10', data = relu9, kernel = pm['c10']['fsize'], \n# num_filter = pm['c10']['fnum'], stride = pm['c10']['stride'], pad = pm['c10']['pad'] )\n# relu10 = mx.sym.Activation(data = conv10, act_type = 'relu')\n\nconv10 = mx.sym.Convolution(name = 'conv10', data = relu9, kernel = (7,7), num_filter = 1, \n stride = (1,1), pad = (0,0) )\nbn10 = mx.sym.BatchNorm(data = conv10)\n\nreshape1 = mx.sym.Reshape(data = bn10, target_shape = (0, 1*256*256))\nfull1 = mx.sym.FullyConnected(data = reshape1, name = 'full1', num_hidden = 100)\nfull2 = mx.sym.FullyConnected(data = full1, name = 'full2', num_hidden = 1*256*256)\nreshape2 = mx.sym.Reshape(data = full2, target_shape = (0,1,256,256))\n\nout = mx.sym.Activation(data = reshape2, act_type = 'sigmoid') \n\n# net = mx.sym.Custom(data = out, name = 'softmax', op_type = 'iou')\n\n\n", "sub_path": "rnn/cache/net_unroll.py", "file_name": "net_unroll.py", "file_ext": "py", "file_size_in_byte": 6403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "mxnet.sym.Variable", "line_number": 82, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 82, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 84, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 84, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 87, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 87, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 89, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 89, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 90, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 90, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 92, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 92, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 94, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 94, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Pooling", "line_number": 96, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 96, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 99, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 99, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 101, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 101, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 103, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 103, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Pooling", "line_number": 104, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 104, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 107, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 107, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 109, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 109, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 111, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 111, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Pooling", "line_number": 112, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 112, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 115, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 115, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 117, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 117, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 118, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 118, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 119, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 119, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 121, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 121, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 123, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 123, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Deconvolution", "line_number": 127, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 127, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 132, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 132, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 134, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 134, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 135, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 135, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Deconvolution", "line_number": 138, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 138, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 144, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 144, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 146, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 146, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 148, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 148, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Deconvolution", "line_number": 151, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 151, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 156, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 156, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 158, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 158, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 159, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 159, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Convolution", "line_number": 164, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 164, "usage_type": "attribute"}, {"api_name": "mxnet.sym.BatchNorm", "line_number": 166, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 166, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Reshape", "line_number": 168, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 168, "usage_type": "attribute"}, {"api_name": "mxnet.sym.FullyConnected", "line_number": 169, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 169, "usage_type": "attribute"}, {"api_name": "mxnet.sym.FullyConnected", "line_number": 170, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 170, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Reshape", "line_number": 171, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 171, "usage_type": "attribute"}, {"api_name": "mxnet.sym.Activation", "line_number": 173, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 173, "usage_type": "attribute"}]} +{"seq_id": "248041", "text": "import keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import RMSprop\nfrom keras.models import load_model\nimport numpy as np\nimport pandas as pd\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.metrics import roc_curve, auc, precision_recall_curve\nimport time\n\nbatch_size = 128\nepochs = 100\n\n# calculate a_crit using equation for Holman line # source: Holman & Wiegert, 1998, https://arxiv.org/pdf/astro-ph/9809315.pdf \ndef acrit(ebin,mu_bin):\n ac = 1.60+5.10*ebin+(-2.22)*ebin**2+4.12*mu_bin+(-4.27)*ebin*mu_bin+(-5.09)*mu_bin**2 + 4.61*(ebin**2)*(mu_bin**2)\n return ac\n\ndef massB(mu,mA):\n return mu*mA/(1-mu)\n\n#def period_ratio(mA,mB,ab,abin):\n# return np.sqrt((4 * (ab/abin)**3 * np.pi**2)/(G*(mA+mB)))\n\ndef period_ratio(ab,abin):\n return np.sqrt((ab/abin)**3)\n\nmA = 1\nG = 39.4769264214\nabin = 1\nmu = 0.1\n\ncolumns = ['ebin', 'ap', 'out']\nbig_job = np.vstack([np.array(map(float, line.split())) for line in open('train_mu_10.txt')])\nbig_batch = pd.DataFrame(big_job,columns=columns)\nbig_batch['mubin'] = mu\nbig_batch['(ap/abin)/ahw99 - 1'] = big_batch['ap']/acrit(big_batch['ebin'],big_batch['mubin']) - 1. \nbig_batch['zeta'] = period_ratio(big_batch['ap'],abin)\nbig_batch['epsilon'] = np.asarray(0.5*(big_batch['zeta'] - np.floor(big_batch['zeta'])))\nbig_batch['binary out'] = np.floor(big_batch['out'])\n\nX = np.asarray(big_batch[['mubin','(ap/abin)/ahw99 - 1','ebin','epsilon']])\ny = np.asarray(big_batch[['binary out']]) # choose not the direct output but the binary version of it \nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)\n\nstart = time.clock()\n\nmodel = Sequential()\nmodel.add(Dense(24, activation='relu', input_shape=(4,)))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(24, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(24, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(24, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(24, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(24, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(1, activation='sigmoid')) # sigmoid/logistic function simpler than softmax \n\nmodel.summary()\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=RMSprop(),\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\n\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0],score[0].shape)\nprint('Test accuracy:', score[1], score[1].shape)\n\nend = time.clock()\nprint(end-start)\n\n# Saving the model \nmodel.save('dropout_6layer_24neuron.h5')\n", "sub_path": "mu_10/make_model_10.py", "file_name": "make_model_10.py", "file_ext": "py", "file_size_in_byte": 3174, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.sqrt", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 48, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.optimizers.RMSprop", "line_number": 70, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "312939083", "text": "\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n### matplotlib inline\n\nfrom nltk.tokenize import TweetTokenizer\nimport datetime\nimport lightgbm as lgb\nfrom scipy import stats\nfrom scipy.sparse import hstack, csr_matrix\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom wordcloud import WordCloud\nfrom collections import Counter\nfrom nltk.corpus import stopwords\nfrom nltk.util import ngrams\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\nfrom sklearn.multiclass import OneVsRestClassifier\nimport time\npd.set_option('max_colwidth',400)\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Conv1D, GRU, CuDNNGRU, CuDNNLSTM, BatchNormalization\nfrom keras.layers import Bidirectional, GlobalMaxPool1D, MaxPooling1D, Add, Flatten, Masking\nfrom keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D\nfrom keras.models import Model, load_model\nfrom keras import initializers, regularizers, constraints, optimizers, layers, callbacks\nfrom keras import backend as K\nfrom keras.engine import InputSpec, Layer\nfrom keras.optimizers import Adam\n\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, Callback, EarlyStopping, ReduceLROnPlateau\nfrom sklearn.preprocessing import OneHotEncoder\nimport os\nprint(os.listdir(\"../input/embeddings/glove.840B.300d/\"))\ntrain = pd.read_csv(\"../input/train.csv\")\ntest = pd.read_csv(\"../input/test.csv\")\nsub = pd.read_csv('../input/sample_submission.csv')\nimport os\nprint('Available embeddings:', os.listdir(\"../input/embeddings/\"))\ntrain[\"target\"].value_counts()\ntrain.head()\nprint('Average word length of questions in train is {0:.0f}.'.format(np.mean(train['question_text'].apply(lambda x: len(x.split())))))\nprint('Average word length of questions in test is {0:.0f}.'.format(np.mean(test['question_text'].apply(lambda x: len(x.split())))))\nprint('Max word length of questions in train is {0:.0f}.'.format(np.max(train['question_text'].apply(lambda x: len(x.split())))))\nprint('Max word length of questions in test is {0:.0f}.'.format(np.max(test['question_text'].apply(lambda x: len(x.split())))))\nprint('Average character length of questions in train is {0:.0f}.'.format(np.mean(train['question_text'].apply(lambda x: len(x)))))\nprint('Average character length of questions in test is {0:.0f}.'.format(np.mean(test['question_text'].apply(lambda x: len(x)))))\nmax_features = 90000\ntk = Tokenizer(lower = True, filters='', num_words=max_features)\nfull_text = list(train['question_text'].values) + list(test['question_text'].values)\ntk.fit_on_texts(full_text)\ntrain_tokenized = tk.texts_to_sequences(train['question_text'].fillna('missing'))\ntest_tokenized = tk.texts_to_sequences(test['question_text'].fillna('missing'))\ntrain['question_text'].apply(lambda x: len(x.split())).plot(kind='hist');\nplt.yscale('log');\nplt.title('Distribution of question text length in characters')\nmax_len = 70\nX_train = pad_sequences(train_tokenized, maxlen = max_len)\nX_test = pad_sequences(test_tokenized, maxlen = max_len)\nembedding_path = \"../input/embeddings/glove.840B.300d/glove.840B.300d.txt\"\n#embedding_path = \"../input/embeddings/paragram_300_sl999/paragram_300_sl999.txt\"\nembed_size = 300\ndef get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')\nembedding_index = dict(get_coefs(*o.split(\" \")) for o in open(embedding_path, encoding='utf-8', errors='ignore'))\nall_embs = np.stack(embedding_index.values())\nemb_mean,emb_std = all_embs.mean(), all_embs.std()\n\nword_index = tk.word_index\nnb_words = min(max_features, len(word_index))\nembedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words + 1, embed_size))\nfor word, i in word_index.items():\n if i >= max_features: continue\n embedding_vector = embedding_index.get(word)\n if embedding_vector is not None: embedding_matrix[i] = embedding_vector\nohe = OneHotEncoder(sparse=False)\ny_ohe = ohe.fit_transform(train['target'].values.reshape(-1, 1))\ndef build_model(lr=0.0, lr_d=0.0, units=0, spatial_dr=0.0, kernel_size1=3, kernel_size2=2, dense_units=128, dr=0.1, conv_size=32, epochs=20):\n file_path = \"best_model.hdf5\"\n check_point = ModelCheckpoint(file_path, monitor = \"val_loss\", verbose = 1,\n save_best_only = True, mode = \"min\")\n early_stop = EarlyStopping(monitor = \"val_loss\", mode = \"min\", patience = 3)\n\n inp = Input(shape = (max_len,))\n x = Embedding(max_features + 1, embed_size, weights = [embedding_matrix], trainable = False)(inp)\n x1 = SpatialDropout1D(spatial_dr)(x)\n\n x_gru = Bidirectional(CuDNNGRU(units, return_sequences = True))(x1)\n x_lstm = Bidirectional(CuDNNLSTM(units, return_sequences = True))(x1)\n \n x_conv1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(x_gru)\n avg_pool1_gru = GlobalAveragePooling1D()(x_conv1)\n max_pool1_gru = GlobalMaxPooling1D()(x_conv1)\n \n x_conv2 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(x_gru)\n avg_pool2_gru = GlobalAveragePooling1D()(x_conv2)\n max_pool2_gru = GlobalMaxPooling1D()(x_conv2)\n \n \n x_conv3 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(x_lstm)\n avg_pool1_lstm = GlobalAveragePooling1D()(x_conv3)\n max_pool1_lstm = GlobalMaxPooling1D()(x_conv3)\n \n x_conv4 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(x_lstm)\n avg_pool2_lstm = GlobalAveragePooling1D()(x_conv4)\n max_pool2_lstm = GlobalMaxPooling1D()(x_conv4)\n \n \n x = concatenate([avg_pool1_gru, max_pool1_gru, avg_pool2_gru, max_pool2_gru,\n avg_pool1_lstm, max_pool1_lstm, avg_pool2_lstm, max_pool2_lstm])\n x = BatchNormalization()(x)\n x = Dropout(dr)(Dense(dense_units, activation='relu') (x))\n x = BatchNormalization()(x)\n x = Dropout(dr)(Dense(int(dense_units / 2), activation='relu') (x))\n x = Dense(2, activation = \"sigmoid\")(x)\n model = Model(inputs = inp, outputs = x)\n model.compile(loss = \"binary_crossentropy\", optimizer = Adam(lr = lr, decay = lr_d), metrics = [\"accuracy\"])\n model.summary()\n history = model.fit(X_train, y_ohe, batch_size = 512, epochs = epochs, validation_split=0.1, \n verbose = 1, callbacks = [check_point, early_stop])\n model = load_model(file_path)\n return model\n### %time\nmodel = build_model(lr = 1e-4, lr_d = 0, units = 64, spatial_dr = 0.5, kernel_size1=4, kernel_size2=3, dense_units=16, dr=0.1, conv_size=16, epochs=5)\n# pred = model.predict(X_test, batch_size = 1024, verbose = 1)\n# predictions = np.round(np.argmax(pred, axis=1)).astype(int)\n# sub['prediction'] = predictions\n# sub.to_csv(\"submission.csv\", index=False)\ndef build_model1(lr=0.0, lr_d=0.0, units=0, spatial_dr=0.0, kernel_size1=3, kernel_size2=2, dense_units=128, dr=0.1, conv_size=32, epochs=20):\n file_path = \"best_model.hdf5\"\n check_point = ModelCheckpoint(file_path, monitor = \"val_loss\", verbose = 1,\n save_best_only = True, mode = \"min\")\n early_stop = EarlyStopping(monitor = \"val_loss\", mode = \"min\", patience = 3)\n\n inp = Input(shape = (max_len,))\n x = Embedding(max_features + 1, embed_size, weights = [embedding_matrix], trainable = False)(inp)\n x1 = SpatialDropout1D(spatial_dr)(x)\n\n x_gru = Bidirectional(CuDNNGRU(units, return_sequences = True))(x1)\n \n x_conv1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(x_gru)\n avg_pool1_gru = GlobalAveragePooling1D()(x_conv1)\n max_pool1_gru = GlobalMaxPooling1D()(x_conv1)\n \n x_conv2 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(x_gru)\n avg_pool2_gru = GlobalAveragePooling1D()(x_conv2)\n max_pool2_gru = GlobalMaxPooling1D()(x_conv2)\n\n \n \n x = concatenate([avg_pool1_gru, max_pool1_gru, avg_pool2_gru, max_pool2_gru])\n x = BatchNormalization()(x)\n x = Dropout(dr)(Dense(dense_units, activation='relu') (x))\n x = BatchNormalization()(x)\n x = Dropout(dr)(Dense(int(dense_units / 2), activation='relu') (x))\n x = Dense(2, activation = \"sigmoid\")(x)\n model = Model(inputs = inp, outputs = x)\n model.compile(loss = \"binary_crossentropy\", optimizer = Adam(lr = lr, decay = lr_d), metrics = [\"accuracy\"])\n model.summary()\n history = model.fit(X_train, y_ohe, batch_size = 512, epochs = epochs, validation_split=0.1, \n verbose = 1, callbacks = [check_point, early_stop])\n model = load_model(file_path)\n return model\n#model1 = build_model1(lr = 1e-4, lr_d = 1e-7, units = 128, spatial_dr = 0.3, kernel_size1=4, kernel_size2=3, dense_units=32, dr=0.3, conv_size=32, epochs=5)\n#model1_1 = build_model1(lr = 1e-4, lr_d = 1e-7, units = 128, spatial_dr = 0.3, kernel_size1=4, kernel_size2=3, dense_units=32, dr=0.1, conv_size=32, epochs=5)\ndef build_model2(lr=0.0, lr_d=0.0, units=0, spatial_dr=0.0, kernel_size1=3, kernel_size2=2, dense_units=128, dr=0.1, conv_size=32, epochs=20):\n file_path = \"best_model.hdf5\"\n check_point = ModelCheckpoint(file_path, monitor = \"val_loss\", verbose = 1,\n save_best_only = True, mode = \"min\")\n early_stop = EarlyStopping(monitor = \"val_loss\", mode = \"min\", patience = 3)\n\n inp = Input(shape = (max_len,))\n x = Embedding(max_features + 1, embed_size, weights = [embedding_matrix], trainable = False)(inp)\n x1 = SpatialDropout1D(spatial_dr)(x)\n\n x_gru = Bidirectional(CuDNNGRU(units * 2, return_sequences = True))(x1)\n x_gru = Bidirectional(CuDNNGRU(units, return_sequences = True))(x_gru)\n \n x_conv1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(x_gru)\n avg_pool1_gru = GlobalAveragePooling1D()(x_conv1)\n max_pool1_gru = GlobalMaxPooling1D()(x_conv1)\n \n x_conv2 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(x_gru)\n avg_pool2_gru = GlobalAveragePooling1D()(x_conv2)\n max_pool2_gru = GlobalMaxPooling1D()(x_conv2)\n \n x = concatenate([avg_pool1_gru, max_pool1_gru, avg_pool2_gru, max_pool2_gru])\n x = BatchNormalization()(x)\n x = Dropout(dr)(Dense(dense_units, activation='relu') (x))\n x = BatchNormalization()(x)\n x = Dropout(dr)(Dense(int(dense_units / 2), activation='relu') (x))\n x = Dense(2, activation = \"sigmoid\")(x)\n model = Model(inputs = inp, outputs = x)\n model.compile(loss = \"binary_crossentropy\", optimizer = Adam(lr = lr, decay = lr_d), metrics = [\"accuracy\"])\n model.summary()\n history = model.fit(X_train, y_ohe, batch_size = 512, epochs = epochs, validation_split=0.1, \n verbose = 1, callbacks = [check_point, early_stop])\n model = load_model(file_path)\n return model\n#%%time\n#model2 = build_model2(lr = 1e-4, lr_d = 1e-7, units = 256, spatial_dr = 0.3, kernel_size1=4, kernel_size2=3, dense_units=32, dr=0.1, conv_size=32, epochs=5)\n#model3 = build_model2(lr = 1e-3, lr_d = 1e-7, units = 256, spatial_dr = 0.3, kernel_size1=4, kernel_size2=3, dense_units=32, dr=0.1, conv_size=16, epochs=5)\n### %time\nmodel4 = build_model2(lr = 1e-4, lr_d = 1e-7, units = 64, spatial_dr = 0.3, kernel_size1=4, kernel_size2=3, dense_units=32, dr=0.1, conv_size=8, epochs=5)\n#model5 = build_model2(lr = 1e-4, lr_d = 1e-7, units = 256, spatial_dr = 0.1, kernel_size1=4, kernel_size2=3, dense_units=32, dr=0.1, conv_size=16, epochs=5)\nclass Attention(Layer):\n def __init__(self, step_dim,\n W_regularizer=None, b_regularizer=None,\n W_constraint=None, b_constraint=None,\n bias=True, **kwargs):\n \"\"\"\n Keras Layer that implements an Attention mechanism for temporal data.\n Supports Masking.\n Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]\n # Input shape\n 3D tensor with shape: `(samples, steps, features)`.\n # Output shape\n 2D tensor with shape: `(samples, features)`.\n :param kwargs:\n Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.\n The dimensions are inferred based on the output shape of the RNN.\n Example:\n model.add(LSTM(64, return_sequences=True))\n model.add(Attention())\n \"\"\"\n self.supports_masking = True\n #self.init = initializations.get('glorot_uniform')\n self.init = initializers.get('glorot_uniform')\n\n self.W_regularizer = regularizers.get(W_regularizer)\n self.b_regularizer = regularizers.get(b_regularizer)\n\n self.W_constraint = constraints.get(W_constraint)\n self.b_constraint = constraints.get(b_constraint)\n\n self.bias = bias\n self.step_dim = step_dim\n self.features_dim = 0\n super(Attention, self).__init__(**kwargs)\n\n def build(self, input_shape):\n assert len(input_shape) == 3\n\n self.W = self.add_weight((input_shape[-1],),\n initializer=self.init,\n name='{}_W'.format(self.name),\n regularizer=self.W_regularizer,\n constraint=self.W_constraint)\n self.features_dim = input_shape[-1]\n\n if self.bias:\n self.b = self.add_weight((input_shape[1],),\n initializer='zero',\n name='{}_b'.format(self.name),\n regularizer=self.b_regularizer,\n constraint=self.b_constraint)\n else:\n self.b = None\n\n self.built = True\n\n def compute_mask(self, input, input_mask=None):\n # do not pass the mask to the next layers\n return None\n\n def call(self, x, mask=None):\n # eij = K.dot(x, self.W) TF backend doesn't support it\n\n # features_dim = self.W.shape[0]\n # step_dim = x._keras_shape[1]\n\n features_dim = self.features_dim\n step_dim = self.step_dim\n\n eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))), (-1, step_dim))\n\n if self.bias:\n eij += self.b\n\n eij = K.tanh(eij)\n\n a = K.exp(eij)\n\n # apply mask after the exp. will be re-normalized next\n if mask is not None:\n # Cast the mask to floatX to avoid float64 upcasting in theano\n a *= K.cast(mask, K.floatx())\n\n # in some cases especially in the early stages of training the sum may be almost zero\n a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n\n a = K.expand_dims(a)\n weighted_input = x * a\n #print weigthted_input.shape\n return K.sum(weighted_input, axis=1)\n\n def compute_output_shape(self, input_shape):\n #return input_shape[0], input_shape[-1]\n return input_shape[0], self.features_dim\ndef build_model3(lr=0.0, lr_d=0.0, units=0, spatial_dr=0.0, dense_units=128, dr=0.1, use_attention=True):\n inp = Input(shape = (max_len,))\n x = Embedding(max_features + 1, embed_size, weights = [embedding_matrix], trainable = False)(inp)\n x1 = SpatialDropout1D(spatial_dr)(x)\n\n x_gru = Bidirectional(CuDNNGRU(units * 2, return_sequences = True))(x1)\n if use_attention:\n x_att = Attention(max_len)(x_gru)\n x = Dropout(dr)(Dense(dense_units, activation='relu') (x_att))\n else:\n x_att = Flatten() (x_gru)\n x = Dropout(dr)(Dense(dense_units, activation='relu') (x_att))\n\n x = BatchNormalization()(x)\n #x = Dropout(dr)(Dense(int(dense_units / 2), activation='relu') (x))\n x = Dense(2, activation = \"sigmoid\")(x)\n model = Model(inputs = inp, outputs = x)\n model.compile(loss = \"binary_crossentropy\", optimizer = Adam(lr = lr, decay = lr_d), metrics = [\"accuracy\"])\n #model.summary()\n #history = model.fit(X_train, y_ohe, batch_size = 512, epochs = epochs, validation_split=0.1, \n # verbose = 1, callbacks = [check_point, early_stop])\n #model = load_model(file_path)\n return model\n### %time\nfile_path = \"best_model.hdf5\"\ncheck_point = ModelCheckpoint(file_path, monitor = \"val_loss\", verbose = 1,\n save_best_only = True, mode = \"min\")\nearly_stop = EarlyStopping(monitor = \"val_loss\", mode = \"min\", patience = 3)\nmodel6 = build_model3(lr = 1e-3, lr_d = 1e-7, units = 64, spatial_dr = 0.3, dense_units=16, dr=0.1, use_attention=True)\nhistory = model6.fit(X_train, y_ohe, batch_size = 512, epochs = 10, validation_split=0.1, \n verbose = 1, callbacks = [check_point, early_stop])\n# #%%time\n# file_path = \"best_model.hdf5\"\n# check_point = ModelCheckpoint(file_path, monitor = \"val_loss\", verbose = 1,\n# save_best_only = True, mode = \"min\")\n# early_stop = EarlyStopping(monitor = \"val_loss\", mode = \"min\", patience = 3)\n# model6_1 = build_model3(lr = 1e-3, lr_d = 1e-7, units = 64, spatial_dr = 0.3, dense_units=16, dr=0.1, use_attention=False)\n# history = model6_1.fit(X_train, y_ohe, batch_size = 512, epochs = 5, validation_split=0.1, \n# verbose = 1, callbacks = [check_point, early_stop])\ndef build_model4(lr=0.0, lr_d=0.0, units=0, spatial_dr=0.0, kernel_size1=3, kernel_size2=2, dense_units=128, dr=0.1, conv_size=32, epochs=20):\n file_path = \"best_model.hdf5\"\n check_point = ModelCheckpoint(file_path, monitor = \"val_loss\", verbose = 1,\n save_best_only = True, mode = \"min\")\n early_stop = EarlyStopping(monitor = \"val_loss\", mode = \"min\", patience = 3)\n\n inp = Input(shape = (max_len,))\n x = Embedding(max_features + 1, embed_size, weights = [embedding_matrix], trainable = False)(inp)\n x1 = SpatialDropout1D(spatial_dr)(x)\n\n x_gru = Bidirectional(CuDNNGRU(units, return_sequences = True))(x1)\n \n x_conv1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(x_gru)\n avg_pool1_gru = GlobalAveragePooling1D()(x_conv1)\n max_pool1_gru = GlobalMaxPooling1D()(x_conv1)\n \n x = concatenate([avg_pool1_gru, max_pool1_gru])\n x = BatchNormalization()(x)\n x = Dropout(dr)(Dense(dense_units, activation='relu') (x))\n x = BatchNormalization()(x)\n #x = Dropout(dr)(Dense(int(dense_units / 2), activation='relu') (x))\n x = Dense(2, activation = \"sigmoid\")(x)\n model = Model(inputs = inp, outputs = x)\n model.compile(loss = \"binary_crossentropy\", optimizer = Adam(lr = lr, decay = lr_d), metrics = [\"accuracy\"])\n model.summary()\n history = model.fit(X_train, y_ohe, batch_size = 512, epochs = epochs, validation_split=0.1, \n verbose = 1, callbacks = [check_point, early_stop])\n model = load_model(file_path)\n return model\n### %time\nmodel7 = build_model4(lr = 1e-4, lr_d = 1e-7, units = 64, spatial_dr = 0.3, kernel_size1=3, dense_units=32, dr=0.1, conv_size=8, epochs=5)\n#model8 = build_model4(lr = 1e-4, lr_d = 1e-7, units = 128, spatial_dr = 0.3, kernel_size1=4, dense_units=32, dr=0.1, conv_size=8, epochs=5)\ndef build_model5(lr=0.0, lr_d=0.0, units=0, spatial_dr=0.0, kernel_size1=3, kernel_size2=2, dense_units=128, dr=0.1, conv_size=32, epochs=20):\n file_path = \"best_model.hdf5\"\n check_point = ModelCheckpoint(file_path, monitor = \"val_loss\", verbose = 1,\n save_best_only = True, mode = \"min\")\n early_stop = EarlyStopping(monitor = \"val_loss\", mode = \"min\", patience = 3)\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,\n patience=2, min_lr=0.001)\n\n inp = Input(shape = (max_len,))\n x = Embedding(max_features + 1, embed_size, weights = [embedding_matrix], trainable = False)(inp)\n x1 = SpatialDropout1D(spatial_dr)(x)\n x_m = Masking()(x1)\n x_gru = LSTM(units)(x_m)\n\n x = BatchNormalization()(x_gru)\n x = Dropout(dr)(Dense(dense_units, activation='relu') (x))\n x = BatchNormalization()(x)\n #x = Dropout(dr)(Dense(int(dense_units / 2), activation='relu') (x))\n x = Dense(2, activation = \"sigmoid\")(x)\n model = Model(inputs = inp, outputs = x)\n model.compile(loss = \"binary_crossentropy\", optimizer = Adam(lr = lr, decay = lr_d), metrics = [\"accuracy\"])\n model.summary()\n history = model.fit(X_train, y_ohe, batch_size = 512, epochs = epochs, validation_split=0.1, \n verbose = 1, callbacks = [check_point, early_stop, reduce_lr])\n model = load_model(file_path)\n return model\n#model9 = build_model5(lr = 1e-4, lr_d = 1e-7, units = 128, spatial_dr = 0.3, kernel_size1=4, dense_units=32, dr=0.1, conv_size=8, epochs=10)\npred1 = model.predict(X_test, batch_size = 1024, verbose = 1)\npred = pred1\npred4 = model4.predict(X_test, batch_size = 1024, verbose = 1)\npred += pred4\npred2 = model7.predict(X_test, batch_size = 1024, verbose = 1)\npred += pred2\n# pred3 = model9.predict(X_test, batch_size = 1024, verbose = 1)\n# pred += pred3\npred4 = model6.predict(X_test, batch_size = 1024, verbose = 1)\npred += pred4\n# pred5 = model7.predict(X_test, batch_size = 1024, verbose = 1)\n# pred += pred5\npred = pred / 4\n#pred = model9.predict(X_test, batch_size = 1024, verbose = 1)\n\npredictions = np.round(np.argmax(pred, axis=1)).astype(int)\nsub['prediction'] = predictions\nsub.to_csv(\"submission.csv\", index=False)", "sub_path": "sources/eda-and-lstm-cnn.py", "file_name": "eda-and-lstm-cnn.py", "file_ext": "py", "file_size_in_byte": 21699, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.set_option", "line_number": 24, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.layers.SpatialDropout1D", "line_number": 91, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.layers.CuDNNGRU", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 94, "usage_type": "call"}, {"api_name": "keras.layers.CuDNNLSTM", "line_number": 94, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 96, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 98, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 100, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 101, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 102, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 105, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 106, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 107, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 109, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 110, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 111, "usage_type": "call"}, {"api_name": "keras.layers.concatenate", "line_number": 114, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 116, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 119, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 119, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 120, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 121, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 122, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 126, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 136, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 138, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 140, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 141, "usage_type": "call"}, {"api_name": "keras.layers.SpatialDropout1D", "line_number": 142, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 144, "usage_type": "call"}, {"api_name": "keras.layers.CuDNNGRU", "line_number": 144, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 146, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 147, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 148, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 150, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 151, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 152, "usage_type": "call"}, {"api_name": "keras.layers.concatenate", "line_number": 156, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 157, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 158, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 158, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 159, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 160, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 160, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 161, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 162, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 163, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 167, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 173, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 175, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 177, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 178, "usage_type": "call"}, {"api_name": "keras.layers.SpatialDropout1D", "line_number": 179, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 181, "usage_type": "call"}, {"api_name": "keras.layers.CuDNNGRU", "line_number": 181, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 182, "usage_type": "call"}, {"api_name": "keras.layers.CuDNNGRU", "line_number": 182, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 184, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 185, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 186, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 188, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 189, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 190, "usage_type": "call"}, {"api_name": "keras.layers.concatenate", "line_number": 192, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 193, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 194, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 194, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 195, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 196, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 196, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 197, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 198, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 199, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 203, "usage_type": "call"}, {"api_name": "keras.engine.Layer", "line_number": 211, "usage_type": "name"}, {"api_name": "keras.initializers.get", "line_number": 233, "usage_type": "call"}, {"api_name": "keras.initializers", "line_number": 233, "usage_type": "name"}, {"api_name": "keras.regularizers.get", "line_number": 235, "usage_type": "call"}, {"api_name": "keras.regularizers", "line_number": 235, "usage_type": "name"}, {"api_name": "keras.regularizers.get", "line_number": 236, "usage_type": "call"}, {"api_name": "keras.regularizers", "line_number": 236, "usage_type": "name"}, {"api_name": "keras.constraints.get", "line_number": 238, "usage_type": "call"}, {"api_name": "keras.constraints", "line_number": 238, "usage_type": "name"}, {"api_name": "keras.constraints.get", "line_number": 239, "usage_type": "call"}, {"api_name": "keras.constraints", "line_number": 239, "usage_type": "name"}, {"api_name": "keras.backend.reshape", "line_number": 280, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 280, "usage_type": "name"}, {"api_name": "keras.backend.dot", "line_number": 280, "usage_type": "call"}, {"api_name": "keras.backend.tanh", "line_number": 285, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 285, "usage_type": "name"}, {"api_name": "keras.backend.exp", "line_number": 287, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 287, "usage_type": "name"}, {"api_name": "keras.backend.cast", "line_number": 292, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 292, "usage_type": "name"}, {"api_name": "keras.backend.floatx", "line_number": 292, "usage_type": "call"}, {"api_name": "keras.backend.cast", "line_number": 295, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 295, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 295, "usage_type": "call"}, {"api_name": "keras.backend.epsilon", "line_number": 295, "usage_type": "call"}, {"api_name": "keras.backend.floatx", "line_number": 295, "usage_type": "call"}, {"api_name": "keras.backend.expand_dims", "line_number": 297, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 297, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 300, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 300, "usage_type": "name"}, {"api_name": "keras.layers.Input", "line_number": 306, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 307, "usage_type": "call"}, {"api_name": "keras.layers.SpatialDropout1D", "line_number": 308, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 310, "usage_type": "call"}, {"api_name": "keras.layers.CuDNNGRU", "line_number": 310, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 313, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 313, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 315, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 316, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 316, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 318, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 320, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 321, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 322, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 330, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 332, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 346, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 348, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 350, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 351, "usage_type": "call"}, {"api_name": "keras.layers.SpatialDropout1D", "line_number": 352, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 354, "usage_type": "call"}, {"api_name": "keras.layers.CuDNNGRU", "line_number": 354, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 356, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 357, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPooling1D", "line_number": 358, "usage_type": "call"}, {"api_name": "keras.layers.concatenate", "line_number": 360, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 361, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 362, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 362, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 363, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 365, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 366, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 367, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 371, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 378, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 380, "usage_type": "call"}, {"api_name": "keras.callbacks.ReduceLROnPlateau", "line_number": 381, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 384, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 385, "usage_type": "call"}, {"api_name": "keras.layers.SpatialDropout1D", "line_number": 386, "usage_type": "call"}, {"api_name": "keras.layers.Masking", "line_number": 387, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 388, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 390, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 391, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 391, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 392, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 394, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 395, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 396, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 418, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 418, "usage_type": "call"}]} +{"seq_id": "455880166", "text": "import asyncio\nimport subprocess\n\nfrom evdev import ecodes, KeyEvent\n\nfrom .keybind import handler, find_devices_by_vidpid, device_reader\nfrom .backend_pactl import PACtlBackend\n\npulse = PACtlBackend()\n\nsinks = pulse.get_sinks()\nsink_speakers = next(filter(lambda t: \"hdmi\" in t[1].lower(), sinks))\nsink_headphones = next(filter(lambda t: \"behringer\" in t[1].lower(), sinks))\n\n@handler(ecodes.KEY_DOT)\ndef dot_handler(kev : KeyEvent):\n if kev.keystate == KeyEvent.key_up:\n print(\"Dot!\")\n\n@handler(ecodes.KEY_A)\ndef ascend_handler(kev : KeyEvent):\n if kev.keystate == KeyEvent.key_up:\n subprocess.run([\"/bin/systemctl\", \"suspend\"], check=True)\n\n@handler(ecodes.KEY_F1)\ndef to_headphones(kev : KeyEvent):\n if kev.keystate == KeyEvent.key_up:\n pulse.move_sink_inputs_to(sink_headphones[1])\n\n@handler(ecodes.KEY_F2)\ndef to_speakers(kev : KeyEvent):\n if kev.keystate == KeyEvent.key_up:\n pulse.move_sink_inputs_to(sink_speakers[1])\n\ndef main():\n for inputdevice in find_devices_by_vidpid(0x239a, 0x80aa):\n asyncio.ensure_future(device_reader(inputdevice))\n\n loop = asyncio.get_event_loop()\n loop.run_forever()\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "pypapoke/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1200, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "backend_pactl.PACtlBackend", "line_number": 9, "usage_type": "call"}, {"api_name": "evdev.KeyEvent", "line_number": 16, "usage_type": "name"}, {"api_name": "evdev.KeyEvent.key_up", "line_number": 17, "usage_type": "attribute"}, {"api_name": "evdev.KeyEvent", "line_number": 17, "usage_type": "name"}, {"api_name": "keybind.handler", "line_number": 15, "usage_type": "call"}, {"api_name": "evdev.ecodes.KEY_DOT", "line_number": 15, "usage_type": "attribute"}, {"api_name": "evdev.ecodes", "line_number": 15, "usage_type": "name"}, {"api_name": "evdev.KeyEvent", "line_number": 21, "usage_type": "name"}, {"api_name": "evdev.KeyEvent.key_up", "line_number": 22, "usage_type": "attribute"}, {"api_name": "evdev.KeyEvent", "line_number": 22, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 23, "usage_type": "call"}, {"api_name": "keybind.handler", "line_number": 20, "usage_type": "call"}, {"api_name": "evdev.ecodes.KEY_A", "line_number": 20, "usage_type": "attribute"}, {"api_name": "evdev.ecodes", "line_number": 20, "usage_type": "name"}, {"api_name": "evdev.KeyEvent", "line_number": 26, "usage_type": "name"}, {"api_name": "evdev.KeyEvent.key_up", "line_number": 27, "usage_type": "attribute"}, {"api_name": "evdev.KeyEvent", "line_number": 27, "usage_type": "name"}, {"api_name": "keybind.handler", "line_number": 25, "usage_type": "call"}, {"api_name": "evdev.ecodes.KEY_F1", "line_number": 25, "usage_type": "attribute"}, {"api_name": "evdev.ecodes", "line_number": 25, "usage_type": "name"}, {"api_name": "evdev.KeyEvent", "line_number": 31, "usage_type": "name"}, {"api_name": "evdev.KeyEvent.key_up", "line_number": 32, "usage_type": "attribute"}, {"api_name": "evdev.KeyEvent", "line_number": 32, "usage_type": "name"}, {"api_name": "keybind.handler", "line_number": 30, "usage_type": "call"}, {"api_name": "evdev.ecodes.KEY_F2", "line_number": 30, "usage_type": "attribute"}, {"api_name": "evdev.ecodes", "line_number": 30, "usage_type": "name"}, {"api_name": "keybind.find_devices_by_vidpid", "line_number": 36, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 37, "usage_type": "call"}, {"api_name": "keybind.device_reader", "line_number": 37, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "80180878", "text": "import pytest\nimport markers\nimport settings\n\nfrom pages.project import ProjectPage\nfrom pages.meetings import MeetingsPage, MeetingDetailPage\n\n\n@pytest.fixture\ndef meetings_page(driver):\n meetings_page = MeetingsPage(driver)\n meetings_page.goto()\n return meetings_page\n\n\n@pytest.mark.skipif(settings.STAGE2, reason='No meetings on staging2')\n@pytest.mark.skipif(settings.TEST, reason='Only one meeting on test')\nclass TestMeetingsPage:\n\n def test_meetings_landing(self, meetings_page):\n assert meetings_page.register_text.absent()\n meetings_page.register_button.click()\n assert meetings_page.register_text.present()\n\n assert meetings_page.upload_text.absent()\n meetings_page.upload_button.click()\n assert meetings_page.upload_text.present()\n\n assert meetings_page.aps_logo.present()\n assert meetings_page.bitss_logo.present()\n assert meetings_page.nrao_logo.present()\n assert meetings_page.spsp_logo.present()\n\n def test_filtering(self, meetings_page):\n default_top_result = meetings_page.top_meeting_link.text\n meetings_page.filter_input.clear()\n meetings_page.filter_input.send_keys('ea')\n filtered_top_result = meetings_page.top_meeting_link.text\n assert default_top_result != filtered_top_result\n\n def test_carets(self, meetings_page):\n default_top_result = meetings_page.top_meeting_link.text\n meetings_page.sort_caret_name_desc.click()\n sorted_top_result = meetings_page.top_meeting_link.text\n assert default_top_result != sorted_top_result\n\n @markers.core_functionality\n def test_meetings_list(self, meetings_page, driver):\n meeting_name = meetings_page.top_meeting_link.text\n meetings_page.top_meeting_link.click()\n meeting_detail = MeetingDetailPage(driver, verify=True)\n assert meeting_name == meeting_detail.meeting_title.text.strip()\n\n\n@pytest.mark.skipif(settings.STAGE2, reason='No meetings on staging2')\n@pytest.mark.skipif(settings.TEST, reason='Only one meeting on test')\nclass TestMeetingDetailPage:\n\n @pytest.fixture\n def meeting_detail_page(self, meetings_page, driver):\n meetings_page.top_meeting_link.click()\n return MeetingDetailPage(driver, verify=True)\n\n @markers.core_functionality\n def test_meeting_detail(self, meeting_detail_page, driver):\n assert meeting_detail_page.entry_download_button.present()\n entry_title = meeting_detail_page.second_entry_link.text\n meeting_detail_page.second_entry_link.click()\n project_page = ProjectPage(driver, verify=True)\n assert entry_title == project_page.title.text\n\n def test_filtering_detail(self, meeting_detail_page):\n default_second_result = meeting_detail_page.second_entry_link.text\n meeting_detail_page.filter_input.clear()\n meeting_detail_page.filter_input.send_keys('w')\n filtered_second_result = meeting_detail_page.second_entry_link.text\n assert default_second_result != filtered_second_result\n\n def test_carets_detail(self, meeting_detail_page):\n default_second_result = meeting_detail_page.second_entry_link.text\n meeting_detail_page.sort_caret_title_asc.click()\n sorted_second_result = meeting_detail_page.second_entry_link.text\n assert default_second_result != sorted_second_result\n\n\n# Future tests could include:\n# - click download button, confirm download count increases (this will have to be omitted in production test runs)\n", "sub_path": "tests/test_meetings.py", "file_name": "test_meetings.py", "file_ext": "py", "file_size_in_byte": 3525, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pages.meetings.MeetingsPage", "line_number": 11, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pages.meetings.MeetingDetailPage", "line_number": 51, "usage_type": "call"}, {"api_name": "markers.core_functionality", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipif", "line_number": 16, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 16, "usage_type": "attribute"}, {"api_name": "settings.STAGE2", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipif", "line_number": 17, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 17, "usage_type": "attribute"}, {"api_name": "settings.TEST", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pages.meetings.MeetingDetailPage", "line_number": 62, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pages.project.ProjectPage", "line_number": 69, "usage_type": "call"}, {"api_name": "markers.core_functionality", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipif", "line_number": 55, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 55, "usage_type": "attribute"}, {"api_name": "settings.STAGE2", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipif", "line_number": 56, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 56, "usage_type": "attribute"}, {"api_name": "settings.TEST", "line_number": 56, "usage_type": "attribute"}]} +{"seq_id": "479038318", "text": "\"\"\"\nA collection of handy utilities\n\"\"\"\n\nfrom typing import List, Tuple, Dict, Any\n\nimport os\nimport glob\nimport json\nimport logging\nimport tarfile\nimport traceback\nimport torch\nimport pprint\nimport copy\nimport numpy\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common import Params\nfrom allennlp.common.params import with_fallback\nfrom allennlp.commands.predict import _PredictManager\nfrom allennlp.common.checks import check_for_gpu\nfrom allennlp.models.archival import load_archive\nfrom allennlp.predictors.predictor import Predictor\n\nlogger = logging.getLogger(__name__)\n\n# count number of sentences in file, if it is a connlu-like\n# file it counts the empty lines, otherwise it counts all \n# lines\ndef countLines(path):\n total = 0\n empty = 0\n for line in open(path):\n total += 1\n if line.strip() == '':\n empty += 1\n if empty < 10:\n return total\n else:\n return empty\n\ndef merge_configs(parameters_config: str, dataset_config: str, overrides: Dict) -> Params:\n \"\"\"\n Merges a dataset config file with a parameters config file\n \"\"\"\n mergedSettings = Params.from_file(parameters_config).as_dict()\n mergedSettings = with_fallback(overrides, mergedSettings)#.update(overrides)\n #mergedSettings = Params(mergedSettings)\n dataset_config = Params.from_file(dataset_config)\n defaultDecoder = mergedSettings['model'].pop('default_decoder')\n orderedStuff = {}\n mergedSettings['dataset_reader']['datasets'] = {}\n mergedSettings['model']['decoders'] = {}\n\n for dataset in dataset_config:\n dataReader = {} \n dataReader['train'] = dataset_config[dataset]['train_data_path']\n dataReader['dev'] = dataset_config[dataset]['validation_data_path']\n if 'test_data_path' in dataset_config[dataset]:\n dataReader['test'] = dataset_config[dataset]['test_data_path']\n\n if 'word_idx' in dataset_config[dataset]:\n dataReader['word_idx'] = dataset_config[dataset]['word_idx']\n else:\n dataReader['sent_idxs'] = dataset_config[dataset]['sent_idxs']\n \n dataReader['tasks'] = {}\n if 'copy_other_columns' in dataset_config[dataset]:\n dataReader['copy_other_columns'] = dataset_config[dataset]['copy_other_columns']\n else:\n dataReader['copy_other_columns'] = mergedSettings['model']['default_dataset']['copy_other_columns']\n\n for task in dataset_config[dataset]['tasks']:\n taskOverride = dataset_config[dataset]['tasks'][task]\n decoder = copy.deepcopy(defaultDecoder)\n decoder.update(taskOverride)\n\n decoder['dataset'] = dataset\n decoder['task'] = task\n\n dataReader['tasks'][task] = copy.deepcopy(decoder)\n orderIdx = decoder['order']\n if 'task_type' not in decoder:\n logger.warning('Error, task ' + task + ' has no defined task_type')\n exit(1)\n curTrans = decoder['task_type']\n curLayer = decoder['layer']\n \n\n if decoder['task_type'] == 'dependency':\n decoder['type'] = 'machamp_dependency_decoder'\n if 'metric' not in dataReader['tasks'][task]:\n decoder['metric'] = 'LAS'\n if 'tag_representation_dim' not in dataReader['tasks'][task]:\n decoder['tag_representation_dim'] = 256\n if 'arc_representation_dim' not in dataReader['tasks'][task]:\n decoder['arc_representation_dim'] = 768\n\n elif decoder['task_type'] == 'classification':\n decoder['type'] = 'machamp_sentence_classifier'\n #ROB TODO why do we need empty kwargs?\n decoder['kwargs'] = {}\n\n elif decoder['task_type'] == 'multiseq':\n decoder['type'] = 'multiseq_decoder'\n\n elif decoder['task_type'] in ['seq', 'string2string']:\n if 'decoder_type' in decoder and decoder['decoder_type'] == 'crf':\n decoder['type'] = 'masked_crf_decoder'\n del decoder['decoder_type']\n del decoder['decoder_type']\n else:\n decoder['type'] = 'machamp_tag_decoder'\n \n else: \n logger.warning('task_type ' + str(dataReader['tasks'][task]['task_type']) + \" not known\")\n exit(1)\n\n if 'metric' not in decoder:\n decoder['metric'] = 'acc'\n if decoder['metric'] == 'span_f1':\n decoder['metric'] = 'machamp_span_f1'\n orderedStuff[task] = [orderIdx, curTrans, curLayer]\n\n # save stuff in mergedSettings\n mergedSettings['model']['decoders'][task] = decoder\n dataReader['tasks'][task] = copy.deepcopy(decoder)\n mergedSettings['dataset_reader']['datasets'][dataset] = dataReader\n # Rob: we definitely do not want to cheat and add dev and test labels here\n mergedSettings[\"datasets_for_vocab_creation\"] = [\"train\"]\n \n del mergedSettings['model']['default_dataset']\n\n # to support reading from multiple files we add them to the datasetreader constructor instead\n # the following ones are there just here to make allennlp happy\n mergedSettings['train_data_path'] = 'train'\n mergedSettings['validation_data_path'] = 'dev'\n if 'test_data_path' in dataset_config[dataset]:\n mergedSettings['test_data_path'] = 'test'\n \n # generate ordered lists, which make it easier to use in the machamp model\n orderedTasks = []\n orderedTaskTypes = []\n orderedLayers = []\n for label, idx in sorted(orderedStuff.items(), key=lambda item: item[1]):\n orderedTasks.append(label)\n orderedTaskTypes.append(orderedStuff[label][1])\n orderedLayers.append(orderedStuff[label][2])\n mergedSettings['model']['tasks'] = orderedTasks\n mergedSettings['model']['task_types'] = orderedTaskTypes\n mergedSettings['model']['layers_for_tasks'] = orderedLayers\n \n mergedSettings['model']['decoders'][orderedTasks[0]]['prev_task'] = None\n for taskIdx, task in enumerate(orderedTasks[1:]):\n mergedSettings['model']['decoders'][task]['prev_task'] = orderedTasks[taskIdx] \n #TODO shouldnt this be -1?\n for task in orderedTasks:\n mergedSettings['model']['decoders'][task]['task_types'] = orderedTaskTypes \n mergedSettings['model']['decoders'][task]['tasks'] = orderedTasks \n #taskIdx is not +1, because first item is skipped\n\n # remove items from tagdecoder, as they are not neccesary there\n for item in ['task_type', 'dataset', 'column_idx', 'layer', 'order']:\n for task in mergedSettings['model']['decoders']:\n if item in mergedSettings['model']['decoders'][task]:\n del mergedSettings['model']['decoders'][task][item]\n\n \n if 'trainer' in overrides and 'cuda_device' in overrides['trainer']:\n mergedSettings['trainer']['cuda_device'] = overrides['trainer']['cuda_device']\n #import pprint\n #pprint.pprint(mergedSettings.as_dict())\n #exit(1)\n numSents = 0\n for dataset in mergedSettings['dataset_reader']['datasets']:\n trainPath = mergedSettings['dataset_reader']['datasets'][dataset]['train']\n numSents += countLines(trainPath)\n warmup = int(numSents/mergedSettings['iterator']['batch_size'])\n mergedSettings['trainer']['learning_rate_scheduler']['warmup_steps'] = warmup\n mergedSettings['trainer']['learning_rate_scheduler']['start_step'] = warmup\n mergedSettings['model']['bert_path'] = mergedSettings['dataset_reader']['token_indexers']['bert']['pretrained_model']\n\n #TODO, this will result in the same as appending _tags , however, the \n # warning will still be there... this can be circumvented by copying \n # allennlp.data.fields.sequence_label_field and add a smarter check...\n #mergedSettings['vocabulary'] = {'non_padded_namespaces': ['ne1']}\n return Params(mergedSettings)\n\n\ndef predict_model_with_archive(predictor: str, params: Params, archive: str,\n input_file: str, output_file: str, batch_size: int = 1):\n cuda_device = params[\"trainer\"][\"cuda_device\"]\n\n check_for_gpu(cuda_device)\n archive = load_archive(archive,\n cuda_device=cuda_device)\n for item in archive.config.duplicate():\n archive.config.__delitem__(item)\n for item in params:\n archive.config[item] = params.as_dict()[item]\n\n predictor = Predictor.from_archive(archive, predictor)\n\n manager = _PredictManager(predictor,\n input_file,\n output_file,\n batch_size,\n print_to_console=False,\n has_dataset_reader=True)\n manager.run()\n\n\ndef predict_model(predictor: str, params: Params, archive_dir: str,\n input_file: str, output_file: str, batch_size: int = 1):\n \"\"\"\n Predict output annotations from the given model and input file and produce an output file.\n :param predictor: the type of predictor to use, e.g., \"machamp_predictor\"\n :param params: the Params of the model\n :param archive_dir: the saved model archive\n :param input_file: the input file to predict\n :param output_file: the output file to save\n :param batch_size: the batch size, set this higher to speed up GPU inference\n \"\"\"\n archive = os.path.join(archive_dir, \"model.tar.gz\")\n predict_model_with_archive(predictor, params, archive, input_file, output_file, batch_size)\n\n\ndef cleanup_training(serialization_dir: str, keep_archive: bool = False, keep_weights: bool = False):\n \"\"\"\n Removes files generated from training.\n :param serialization_dir: the directory to clean\n :param keep_archive: whether to keep a copy of the model archive\n :param keep_weights: whether to keep copies of the intermediate model checkpoints\n \"\"\"\n if not keep_weights:\n for file in glob.glob(os.path.join(serialization_dir, \"*.th\")):\n os.remove(file)\n if not keep_archive:\n os.remove(os.path.join(serialization_dir, \"model.tar.gz\"))\n\n\ndef archive_bert_model(serialization_dir: str, config_file: str, output_file: str = None):\n \"\"\"\n Extracts BERT parameters from the given model and saves them to an archive.\n :param serialization_dir: the directory containing the saved model archive\n :param config_file: the configuration file of the model archive\n :param output_file: the output BERT archive name to save\n \"\"\"\n archive = load_archive(os.path.join(serialization_dir, \"model.tar.gz\"))\n\n\n model = archive.model\n model.eval()\n\n try:\n bert_model = model.text_field_embedder.token_embedder_bert.model\n except AttributeError:\n logger.warning(f\"Could not find the BERT model inside the archive {serialization_dir}\")\n traceback.print_exc()\n return\n\n weights_file = os.path.join(serialization_dir, \"pytorch_model.bin\")\n torch.save(bert_model.state_dict(), weights_file)\n\n if not output_file:\n output_file = os.path.join(serialization_dir, \"bert-finetune.tar.gz\")\n\n with tarfile.open(output_file, 'w:gz') as archive:\n archive.add(config_file, arcname=\"bert_config.json\")\n archive.add(weights_file, arcname=\"pytorch_model.bin\")\n\n os.remove(weights_file)\n\n\ndef to_multilabel_sequence(predictions, vocab, task):\n #TODO @AR: Hard-coded parameters for now\n THRESH = 0.5\n k = 2\n outside_index = vocab.get_token_index(\"O\", namespace=task)\n\n # @AR: Get the thresholded matrix and prepare the prediction sequence\n pred_over_thresh = (predictions >= THRESH) * predictions\n sequence_token_labels = []\n\n # @AR: For each label set, check if to apply argmax or sigmoid thresh\n for pred in pred_over_thresh:\n num_pred_over_thresh = numpy.count_nonzero(pred)\n\n if num_pred_over_thresh < k:\n pred_idx_list = [numpy.argmax(predictions, axis=-1)]\n # print(\"argmax ->\", pred_idx_list)\n else:\n pred_idx_list = [numpy.argmax(predictions, axis=-1)]\n # pred_idx_list = list(numpy.argpartition(pred, -k)[-k:])\n # # print(\"sigmoid ->\", pred_idx_list)\n\n # # If the first (i.e., second best) is \"O\", ignore/remove it\n # if pred_idx_list[0] == outside_index:\n # pred_idx_list = pred_idx_list[1:]\n # # If the second (i.e., the best) is \"O\", ignore/remove the first\n # elif pred_idx_list[1] == outside_index:\n # pred_idx_list = pred_idx_list[1:]\n # else:\n # pass\n\n sequence_token_labels.append(pred_idx_list)\n\n return sequence_token_labels\n", "sub_path": "machamp/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 12823, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 43, "usage_type": "name"}, {"api_name": "allennlp.common.Params.from_file", "line_number": 47, "usage_type": "call"}, {"api_name": "allennlp.common.Params", "line_number": 47, "usage_type": "name"}, {"api_name": "allennlp.common.params.with_fallback", "line_number": 48, "usage_type": "call"}, {"api_name": "allennlp.common.Params.from_file", "line_number": 50, "usage_type": "call"}, {"api_name": "allennlp.common.Params", "line_number": 50, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 76, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 82, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 128, "usage_type": "call"}, {"api_name": "allennlp.common.Params", "line_number": 188, "usage_type": "call"}, {"api_name": "allennlp.common.Params", "line_number": 43, "usage_type": "name"}, {"api_name": "allennlp.common.Params", "line_number": 191, "usage_type": "name"}, {"api_name": "allennlp.common.checks.check_for_gpu", "line_number": 195, "usage_type": "call"}, {"api_name": "allennlp.models.archival.load_archive", "line_number": 196, "usage_type": "call"}, {"api_name": "allennlp.predictors.predictor.Predictor.from_archive", "line_number": 203, "usage_type": "call"}, {"api_name": "allennlp.predictors.predictor.Predictor", "line_number": 203, "usage_type": "name"}, {"api_name": "allennlp.commands.predict._PredictManager", "line_number": 205, "usage_type": "call"}, {"api_name": "allennlp.common.Params", "line_number": 214, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path", "line_number": 237, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 238, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "allennlp.models.archival.load_archive", "line_number": 250, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 250, "usage_type": "call"}, {"api_name": "os.path", "line_number": 250, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 263, "usage_type": "call"}, {"api_name": "os.path", "line_number": 263, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 264, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "attribute"}, {"api_name": "tarfile.open", "line_number": 269, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 294, "usage_type": "call"}]} +{"seq_id": "113538059", "text": "###########################\n# Latent ODEs for Irregularly-Sampled Time Series\n# Author: Yulia Rubanova\n# Modified by Ji Won Park (@jiwoncpark) for joint reconstruction for\n# multiple filters and parameter regression\n###########################\n\nfrom functools import partial\nimport torch\nimport magnify.latent_ode.lib.utils as utils\nfrom magnify.latent_ode.periodic_utils import Periodic_1d, Periodic1dDataset\nimport magnify.latent_ode.drw_utils as drw_utils\nfrom torch.distributions import uniform\nfrom torch.utils.data import DataLoader\n\n\ndef parse_datasets(args, device):\n\n def basic_collate_fn(batch, time_steps, args = args, device=device, data_type=\"train\"):\n tseries, labels = map(list, zip(*batch))\n tseries = torch.stack(tseries, dim=0)\n labels = torch.stack(labels, dim=0)\n tseries = tseries.to(device) # [n_samples, n_times, input_dim]\n labels = labels.to(device) # [n_samples, n_labels]\n # batch = torch.stack(batch) # [B, n_times, 2, 1]\n data_dict = {\n \"data\": tseries,\n \"time_steps\": time_steps}\n # physionet did this before calling split_and_subsample_batch\n data_dict[\"labels\"] = labels\n data_dict = utils.split_and_subsample_batch(data_dict, args,\n data_type=data_type)\n return data_dict\n\n dataset_name = args.dataset\n\n n_total_tp = args.timepoints + args.extrap\n max_t_extrap = args.max_t / args.timepoints * n_total_tp\n\n ##################################################################\n\n if dataset_name == 'drw':\n train_seed = 123\n val_seed = 456\n train_dataset, test_dataset = drw_utils.get_drw_datasets(train_seed,\n val_seed)\n # record_id, tt, y_vals, labels, mask = train_dataset[0]\n input_dim = train_dataset[0]['y'].shape[-1] # [n_filters]\n n_labels = len(train_dataset.get_sliced_params())\n batch_size = args.batch_size\n print(\"batch size\", batch_size)\n # record_id, tt, vals, mask, labels = train_data[0]\n\n # n_samples = len(total_dataset)\n data_min, data_max = drw_utils.get_data_min_max(train_dataset,\n device)\n print(\"Data min: \", data_min)\n print(\"Data max: \", data_max)\n\n train_dataloader = DataLoader(train_dataset,\n batch_size=batch_size,\n shuffle=False,\n # num_workers=4,\n collate_fn=partial(drw_utils.variable_time_collate_fn,\n args=args,\n device=device,\n data_type=\"train\",\n data_min=data_min,\n data_max=data_max))\n test_dataloader = DataLoader(test_dataset,\n batch_size=len(test_dataset),\n shuffle=False,\n collate_fn=partial(drw_utils.variable_time_collate_fn,\n args=args,\n device=device,\n data_type=\"test\",\n data_min=data_min,\n data_max=data_max))\n\n attr_names = train_dataset.get_sliced_params()\n data_objects = {\"dataset_obj\": train_dataset,\n \"train_dataloader\": utils.inf_generator(train_dataloader),\n \"test_dataloader\": utils.inf_generator(test_dataloader),\n \"input_dim\": input_dim,\n \"n_train_batches\": len(train_dataloader),\n \"n_test_batches\": len(test_dataloader),\n \"attr\": attr_names, # optional\n \"classif_per_tp\": False, # optional\n \"n_labels\": n_labels} # optional\n return data_objects\n\n ########### 1d datasets ###########\n\n # Sampling args.timepoints time points in the interval [0, args.max_t]\n # Sample points for both training sequence and explapolation (test)\n distribution = uniform.Uniform(torch.Tensor([0.0]),torch.Tensor([max_t_extrap]))\n time_steps_extrap = distribution.sample(torch.Size([n_total_tp-1]))[:,0]\n time_steps_extrap = torch.cat((torch.Tensor([0.0]), time_steps_extrap))\n time_steps_extrap = torch.sort(time_steps_extrap)[0]\n\n dataset_obj = None\n ##################################################################\n # Sample a periodic function\n if dataset_name == \"periodic\":\n dataset_obj = Periodic_1d(\n init_freq = None, init_amplitude = 1.,\n final_amplitude = 1., final_freq = None,\n z0 = 1.)\n\n ##################################################################\n\n if dataset_obj is None:\n raise Exception(\"Unknown dataset: {}\".format(dataset_name))\n\n print(\"n_samples\", args.n)\n dataset = dataset_obj.sample_traj(time_steps_extrap, n_samples=args.n,\n noise_weight=args.noise_weight)\n\n # Process small datasets\n time_steps_extrap = time_steps_extrap.to(device)\n\n train_y, test_y = utils.split_train_test(dataset, train_frac=0.8)\n train_data = Periodic1dDataset(train_y)\n test_data = Periodic1dDataset(test_y)\n\n # first example (0), first in tuple for tseries (0), 2nd dim of each tseries\n input_dim = train_y[0].size(-1) # which-dimensional time series?\n\n batch_size = min(args.batch_size, args.n)\n print(\"batch size\", batch_size)\n train_dataloader = DataLoader(train_data,\n batch_size=batch_size,\n shuffle=False,\n collate_fn=lambda b: basic_collate_fn(b, time_steps_extrap, data_type=\"train\"))\n test_dataloader = DataLoader(test_data,\n batch_size=args.n,\n shuffle=False,\n collate_fn=lambda b: basic_collate_fn(b, time_steps_extrap, data_type = \"test\"))\n print(\"number of train batches\", len(train_dataloader))\n print(\"number of test batches\", len(test_dataloader))\n data_objects = {\"train_dataloader\": utils.inf_generator(train_dataloader),\n \"test_dataloader\": utils.inf_generator(test_dataloader),\n \"input_dim\": input_dim,\n \"n_train_batches\": len(train_dataloader),\n \"n_test_batches\": len(test_dataloader),\n \"n_labels\": 1,\n \"classif_per_tp\": False, }\n return data_objects\n\n\n", "sub_path": "magnify/latent_ode/lib/parse_datasets.py", "file_name": "parse_datasets.py", "file_ext": "py", "file_size_in_byte": 7052, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torch.stack", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 22, "usage_type": "call"}, {"api_name": "magnify.latent_ode.lib.utils.split_and_subsample_batch", "line_number": 31, "usage_type": "call"}, {"api_name": "magnify.latent_ode.lib.utils", "line_number": 31, "usage_type": "name"}, {"api_name": "magnify.latent_ode.drw_utils.get_drw_datasets", "line_number": 45, "usage_type": "call"}, {"api_name": "magnify.latent_ode.drw_utils", "line_number": 45, "usage_type": "name"}, {"api_name": "magnify.latent_ode.drw_utils.get_data_min_max", "line_number": 55, "usage_type": "call"}, {"api_name": "magnify.latent_ode.drw_utils", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 60, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 64, "usage_type": "call"}, {"api_name": "magnify.latent_ode.drw_utils.variable_time_collate_fn", "line_number": 64, "usage_type": "attribute"}, {"api_name": "magnify.latent_ode.drw_utils", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 70, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 73, "usage_type": "call"}, {"api_name": "magnify.latent_ode.drw_utils.variable_time_collate_fn", "line_number": 73, "usage_type": "attribute"}, {"api_name": "magnify.latent_ode.drw_utils", "line_number": 73, "usage_type": "name"}, {"api_name": "magnify.latent_ode.lib.utils.inf_generator", "line_number": 82, "usage_type": "call"}, {"api_name": "magnify.latent_ode.lib.utils", "line_number": 82, "usage_type": "name"}, {"api_name": "magnify.latent_ode.lib.utils.inf_generator", "line_number": 83, "usage_type": "call"}, {"api_name": "magnify.latent_ode.lib.utils", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.distributions.uniform.Uniform", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.distributions.uniform", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.Size", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.sort", "line_number": 99, "usage_type": "call"}, {"api_name": "magnify.latent_ode.periodic_utils.Periodic_1d", "line_number": 105, "usage_type": "call"}, {"api_name": "magnify.latent_ode.lib.utils.split_train_test", "line_number": 122, "usage_type": "call"}, {"api_name": "magnify.latent_ode.lib.utils", "line_number": 122, "usage_type": "name"}, {"api_name": "magnify.latent_ode.periodic_utils.Periodic1dDataset", "line_number": 123, "usage_type": "call"}, {"api_name": "magnify.latent_ode.periodic_utils.Periodic1dDataset", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 135, "usage_type": "call"}, {"api_name": "magnify.latent_ode.lib.utils.inf_generator", "line_number": 141, "usage_type": "call"}, {"api_name": "magnify.latent_ode.lib.utils", "line_number": 141, "usage_type": "name"}, {"api_name": "magnify.latent_ode.lib.utils.inf_generator", "line_number": 142, "usage_type": "call"}, {"api_name": "magnify.latent_ode.lib.utils", "line_number": 142, "usage_type": "name"}]} +{"seq_id": "271655211", "text": "import subprocess\nimport string\nimport random\nimport time\n\nimport yaml\nimport paramiko\n\n\ndef execute_command(\n command,\n working_directory,\n environment_variables,\n executor,\n logger,\n):\n logger_prefix = ''\n if executor:\n logger_prefix = executor + ': '\n\n process = subprocess.Popen(\n command,\n cwd=working_directory,\n env=environment_variables,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n )\n\n logger.debug(logger_prefix + 'command: ' + command)\n\n stdout = ''\n for line in iter(process.stdout.readline, b''):\n line = str(line, 'utf-8')\n stdout += line\n logger.debug(logger_prefix + 'command output: ' + line.rstrip())\n\n return_code = process.wait()\n\n stdout = stdout.rstrip()\n\n return stdout, return_code\n\n\ndef execute_ssh_command(\n command,\n environment_variables,\n hostname,\n ssh_port,\n ssh_username,\n ssh_private_key_file,\n executor,\n logger,\n):\n logger_prefix = ''\n if executor:\n logger_prefix = executor + ': '\n\n logger.debug(logger_prefix + 'command: ' + command)\n\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n ssh_client.connect(\n hostname=hostname,\n port=ssh_port,\n username=ssh_username,\n key_filename=ssh_private_key_file,\n look_for_keys=False,\n )\n\n stdin, stdout, stderr = ssh_client.exec_command(\n command,\n environment=environment_variables,\n )\n\n stdout_string = ''\n for line in stdout:\n stdout_string += line\n logger.debug(logger_prefix + 'command stdout: ' + line.rstrip())\n stdout_string = stdout_string.rstrip()\n\n for line in stderr:\n logger.debug(logger_prefix + 'command stderr: ' + line.rstrip())\n\n return_code = stdout.channel.recv_exit_status()\n\n ssh_client.close()\n\n return stdout_string, return_code\n\n\ndef parse_yaml(yaml_file_path):\n with open(yaml_file_path, mode='r', encoding='utf-8') as yaml_file:\n content = yaml.load(yaml_file)\n return content\n\n\ndef generate_random_string(length):\n chars = string.digits + string.ascii_lowercase\n return ''.join(random.choice(chars) for _ in range(length))\n\n\ndef wait_until_ssh_ready(\n hostname,\n ssh_port,\n ssh_username,\n ssh_private_key_file,\n):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n while True:\n try:\n ssh_client.connect(\n hostname=hostname,\n port=ssh_port,\n username=ssh_username,\n key_filename=ssh_private_key_file,\n look_for_keys=False,\n timeout=2,\n )\n ssh_client.close()\n break\n except Exception as e:\n time.sleep(1)\n", "sub_path": "distributed_lsh_alessandro_de_palma/cCube/orchestrator-master/orchestrator/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2964, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "subprocess.Popen", "line_number": 21, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 26, "usage_type": "attribute"}, {"api_name": "paramiko.SSHClient", "line_number": 61, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 62, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 95, "usage_type": "call"}, {"api_name": "string.digits", "line_number": 100, "usage_type": "attribute"}, {"api_name": "string.ascii_lowercase", "line_number": 100, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 101, "usage_type": "call"}, {"api_name": "paramiko.SSHClient", "line_number": 110, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 111, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "256683277", "text": "from django.urls import path\nfrom ..views import product_views as views\n\n\n\n\nurlpatterns =[\n\n path('', views.getProducts, name='products'),\n path('create/', views.createProduct, name='create-product'),\n path('upload/', views.uploadImage, name='image-upload'),\n path('delete/selected/', views.deleteSelectedProducts, name='delete-selected-product'),\n path('top/', views.getTopProducts, name='top-products'),\n\n path('/', views.getProduct, name='product'),\n path('reviews//', views.createProductReview, name='reviews'),\n path('update//', views.updateProduct, name='update-product'),\n path('delete//', views.deleteProduct, name='delete-product'),\n]", "sub_path": "api/urls/product_urls.py", "file_name": "product_urls.py", "file_ext": "py", "file_size_in_byte": 702, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.product_views.getProducts", "line_number": 9, "usage_type": "attribute"}, {"api_name": "views.product_views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.product_views.createProduct", "line_number": 10, "usage_type": "attribute"}, {"api_name": "views.product_views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.product_views.uploadImage", "line_number": 11, "usage_type": "attribute"}, {"api_name": "views.product_views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "views.product_views.deleteSelectedProducts", "line_number": 12, "usage_type": "attribute"}, {"api_name": "views.product_views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "views.product_views.getTopProducts", "line_number": 13, "usage_type": "attribute"}, {"api_name": "views.product_views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "views.product_views.getProduct", "line_number": 15, "usage_type": "attribute"}, {"api_name": "views.product_views", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "views.product_views.createProductReview", "line_number": 16, "usage_type": "attribute"}, {"api_name": "views.product_views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "views.product_views.updateProduct", "line_number": 17, "usage_type": "attribute"}, {"api_name": "views.product_views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "views.product_views.deleteProduct", "line_number": 18, "usage_type": "attribute"}, {"api_name": "views.product_views", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "153003243", "text": "import torch\nimport numpy as np\n\nfrom .rl_model.decider import DQN\nfrom .player import Player, Action\nfrom .cards import Card, Rank, Suit\n\nclass RLPlayer(Player):\n\tdef __init__(self, n_decks, *kargs, model_file=None, **kwargs):\n\t\tsuper().__init__(*kargs, name='Me (the computer)', **kwargs)\n\n\t\tself.n_decks = n_decks\n\t\tself.n_aces_left = None \n\t\tself.n_23or4_left = None \n\t\tself.n_56or7_left = None \n\t\tself.n_8or9_left = None \n\t\tself.n_10val_left = None \n\t\tself.n_total_left = None \n\t\tself.dealer_card = None\n\t\tself.shoe_shuffled()\n\n\t\t# init neural net\n\t\tself.use_pretrained = False\n\t\tif model_file:\n\t\t\tself.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\t\t\tself.policy_net = DQN().to(self.device).float()\n\t\t\tself.policy_net.load_state_dict(torch.load(model_file))\n\t\t\tself.policy_net.eval()\n\t\t\tself.use_pretrained = True\n\n\t@property\n\tdef current_state(self):\n\t\tn_decks_left = self.n_total_left / len(Suit) / len(Rank)\n\t\tdef n_left_per_deck(n_left):\n\t\t\treturn n_left / n_decks_left\n\t\tstate = (\n\t\t\tn_left_per_deck(self.n_aces_left),\n\t\t\tn_left_per_deck(self.n_23or4_left),\n\t\t\tn_left_per_deck(self.n_56or7_left),\n\t\t\tn_left_per_deck(self.n_8or9_left),\n\t\t\tn_left_per_deck(self.n_10val_left),\n\t\t\tself.hand.value,\n\t\t\tint(self.hand.is_soft),\n\t\t\tmin(self.dealer_card.value, 10),\n\t\t\tn_decks_left\n\t\t)\n\t\treturn state\n\n\t@property\n\tdef current_state_tensor(self):\n\t\treturn torch.from_numpy(np.array(self.current_state)).float().to(self.device)\n\n\tdef card_was_drawn(self, card):\n\t\tif card.rank == Rank.A:\n\t\t\tself.n_aces_left -= 1\n\t\telif 2 <= card.value <= 4:\n\t\t\tself.n_23or4_left -= 1\n\t\telif 5 <= card.value <= 7:\n\t\t\tself.n_56or7_left -= 1\n\t\telif 8 <= card.value <= 9:\n\t\t\tself.n_8or9_left -= 1\n\t\telse:\n\t\t\tself.n_10val_left -= 1\n\t\tself.n_total_left -= 1\n\n\tdef dealer_card_set(self, card):\n\t\tself.dealer_card = card\n\n\tdef shoe_shuffled(self):\n\t\tn_suits = len(Suit)\n\t\tn_ranks = len(Rank)\n\t\tself.n_aces_left = n_suits * self.n_decks\n\t\tself.n_23or4_left = n_suits * self.n_decks * 3\n\t\tself.n_56or7_left = n_suits * self.n_decks * 3\n\t\tself.n_8or9_left = n_suits * self.n_decks * 2\n\t\tself.n_10val_left = n_suits * self.n_decks * 4\n\t\tself.n_total_left = n_suits * self.n_decks * n_ranks\n\n\t\tself.dealer_card = None\n\n\tdef action(self):\n\t\tif not self.use_pretrained:\n\t\t\treturn Action.Hit\n\t\twith torch.no_grad():\n\t\t\tnet_result = self.policy_net(self.current_state_tensor).max(0)[1].view(1,1).item()\n\t\t\treturn Action(1+net_result)\n\n", "sub_path": "blackjack/model/rl_player.py", "file_name": "rl_player.py", "file_ext": "py", "file_size_in_byte": 2430, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "player.Player", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rl_model.decider.DQN", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 27, "usage_type": "call"}, {"api_name": "cards.Suit", "line_number": 33, "usage_type": "argument"}, {"api_name": "cards.Rank", "line_number": 33, "usage_type": "argument"}, {"api_name": "torch.from_numpy", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "cards.Rank.A", "line_number": 54, "usage_type": "attribute"}, {"api_name": "cards.Rank", "line_number": 54, "usage_type": "name"}, {"api_name": "cards.Suit", "line_number": 70, "usage_type": "argument"}, {"api_name": "cards.Rank", "line_number": 71, "usage_type": "argument"}, {"api_name": "player.Action.Hit", "line_number": 83, "usage_type": "attribute"}, {"api_name": "player.Action", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 84, "usage_type": "call"}, {"api_name": "player.Action", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "193195975", "text": "\n# Creating a function dm(im,k) to show the k dominant colors in an image im.\n# Uses of this function: Display the dominant colors in an image\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\nimport sys\nfrom PIL import Image\n\t\n\t\ndef KmeansClustering(im,k):\n\n\tim = im.resize((400,400),Image.ANTIALIAS)\n\tim = np.array(im)\n\th,w = 400,400\n\ttry:\n\t\tch = np.shape(im)[2]\n\texcept IndexError:\n\t\tch = 1\n\t# centeroids = np.random.randint(())\n\tim = im.reshape((w*h,ch))\n\tkmeans = KMeans(n_clusters=k)\n\tkmeans.fit(im)\n\n\n\treturn kmeans.cluster_centers_.astype('uint8')\n\ndef domColors(im,k):\n\t# Find the least square distance \n\n\tcolors = KmeansClustering(im,k)\n\tim = np.array(im)\n\n\ttry:\n\t\th,w,c = im.shape\n\texcept IndexError:\n\t\th,w = im.shape\n\n\n\tch = h//10\n\tcw = w//k\n\t\n\tnew_img = np.empty((h+ch,w,3))\n\tnew_img[0:h , 0:w, :] = im\n\t\n\tfor i,color in enumerate(colors):\n\n\t\tnew_img[h:h+ch,cw*i:cw*(i+1),:] = color[:]\n\t\t# print(new_img[h:h+200,cw*i:cw*(i+1),:],color,w+cw*i,w+cw*(i+1))\n\tnew_img = new_img.astype('uint8')\n\tplt.imshow(new_img)\n\tplt.axis('off')\n\tplt.title(\"Dominant Colors in the image\")\n\tplt.show()\n\n\treturn colors\n\n\n\nif __name__ == \"__main__\":\n\n\t# Get the input\n\tpath = sys.argv[1]\n\tk = int(sys.argv[2])\n\tim = Image.open(path)\n\n\t# Run the function\n\tdomColors(im,k)\n", "sub_path": "DIP/Assignment1/src/domColor.py", "file_name": "domColor.py", "file_ext": "py", "file_size_in_byte": 1300, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "PIL.Image.ANTIALIAS", "line_number": 14, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 65, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "193619600", "text": "import base64\nimport hashlib\nimport os\nimport qrcode\nimport re\nimport sys\nimport tkinter as Tk\nimport tkinter.filedialog, tkinter.messagebox\nimport tempfile\nfrom PIL import ImageTk\nimport zipfile\n\ndef makeSimpleQR( str, err_cor ) :\n qr = qrcode.QRCode( error_correction = err_cor, box_size = 2, border = 8)\n qr.add_data( str )\n qr.make()\n im = qr.make_image( fill_color = 'black', back_color = 'white' )\n return [ ImageTk.PhotoImage( im ) ]\n\ndef outputQR( ix, qrHead, b64, fm, to, err_cor ) :\n print( '[ {}, {} ]'.format( fm, to ) )\n qr = qrcode.QRCode( error_correction = err_cor, box_size = 2, border = 8)\n qr.add_data( qrHead + b64[ fm : to ] )\n qr.make()\n im = qr.make_image( fill_color = 'black', back_color = 'white' )\n return ImageTk.PhotoImage( im )\n\nerrCorrTab = [\n ( qrcode.constants.ERROR_CORRECT_L, ( 2953, 'L (7%) 2,953 byte' ) ),\n ( qrcode.constants.ERROR_CORRECT_M,\n ( 2331, 'M (15%, default) 2,331 byte' ) ),\n ( qrcode.constants.ERROR_CORRECT_Q, ( 1663, 'Q (25%) 1,663 byte' ) ),\n ( qrcode.constants.ERROR_CORRECT_H, ( 1272, 'H (30%) 1,272 byte' ) ) ]\ndef makeQR( ifn, err_cor ) :\n global errCorrTab\n qrc = []\n with open( ifn, 'rb' ) as f :\n a = f.read()\n b64 = base64.b64encode( a ).decode( 'utf-8' )\n print( 'size = {}'.format( len( b64 ) ) )\n csiz = None\n for k, (s, _) in errCorrTab :\n if k == err_cor :\n csiz = s\n break\n basename = os.path.basename( ifn )\n csiz -= len( 'abcd:01:10:{}:'.format( basename ) )\n qrHash = hashlib.sha256( ( b64 + '{}'.format(err_cor) ).encode() ) \\\n .hexdigest()[0:4]\n last = ( len( b64 ) + csiz - 1 ) // csiz\n qrHeadFmt = qrHash + ':{:02}:' + '{:02}:{}:'.format( last, basename )\n\n for i in range( 0, len( b64 ) - csiz + 1, csiz ) :\n qrHead = qrHeadFmt.format( i // csiz )\n q = outputQR( i // csiz, qrHead, b64, i, i + csiz, err_cor )\n qrc.append( q )\n if len( b64 ) % csiz != 0 :\n ix = len( b64 ) // csiz\n qrHead = qrHeadFmt.format( last - 1 )\n q = outputQR( ix, qrHead, b64, ix * csiz, len( b64 ), err_cor )\n qrc.append( q )\n return qrc\n\nreg_qr = re.compile(\n r'([\\da-f][\\da-f][\\da-f][\\da-f]):(\\d\\d):(\\d\\d):([^:]+):([+/\\w]+=*)' )\ndef mergeBase64( ifn ) :\n global reg_qr\n with open( ifn, 'r' ) as f :\n hsh = None\n tl = None\n ofn = None\n cts = {}\n for line in f :\n line = re.sub( r'\\r?\\n$', '', line )\n m = reg_qr.search( line )\n if m == None :\n print( 'skip : ' + line )\n continue\n if hsh == None :\n hsh = m.group( 1 )\n tl = m.group( 3 )\n ofn = m.group( 4 )\n elif hsh != m.group( 1 ) or tl != m.group( 3 ) \\\n or ofn != m.group( 4 ) :\n continue\n cts[ str( int( m.group( 2 ) ) ) ] = m.group( 5 )\n sum = ''\n for i in range( int( tl ) ) :\n if not str( i ) in cts :\n sum = ''\n print( 'Detects lack parts : {}'.format( i ) )\n break\n sum += cts[ str( i ) ]\n if sum != '' :\n dir = os.path.dirname( ifn )\n with open( os.path.join( dir, ofn ), 'wb' ) as of :\n of.write( base64.b64decode( sum ) )\n\nbtn_fn = None\ntxt_fn = None\nbln_zip = None\nchk_zip = None\nopt_err_var = None\nstr_inMethod = None\ntxt_direct = None\nbtn_dec = None\nbtn_head = None\nbtn_next = None\nimg = None\nimg_no = 0\ncanvas = None\n\ndef file_btn_click() :\n global txt_fn\n fTyp = [ ('', '*') ]\n iDir = os.path.abspath( os.path.dirname( __file__ ) )\n ifn = Tk.filedialog.askopenfilename(\n filetypes = fTyp, initialdir = iDir)\n txt_fn.delete( 0, Tk.END )\n txt_fn.insert( Tk.END, ifn )\n\ndef disp_qr() :\n global canvas\n global img\n global img_no\n global txt_qrno\n global btn_head\n global btn_next\n canvas.create_image( 0, 0, image = img[ img_no ], anchor= Tk.NW )\n txt_qrno.set( '{} / 0 - {}'.format( img_no, len( img ) - 1 ) )\n if img_no == 0 :\n btn_head.configure( state = 'disabled' )\n else :\n btn_head.configure( state = 'normal' )\n if img_no + 1 == len( img ) :\n btn_next.configure( state = 'disabled' )\n else :\n btn_next.configure( state = 'normal' )\n\ndef next_btn_click() :\n global img\n global img_no\n if img_no + 1 != len( img ) :\n img_no += 1\n disp_qr()\n\ndef head_btn_click() :\n global img_no\n img_no = 0\n disp_qr()\n\ndef qrcode_btn_click() :\n global txt_fn\n global bln_zip\n global opt_err_var\n global errCorrTab\n global canvas\n global txt_qrno\n global str_inMethod\n global txt_direct\n global btn_head\n global btn_next\n global img\n\n val = None\n for ( v, ( s, d ) ) in errCorrTab :\n if opt_err_var.get() == d :\n val = v\n break\n img = []\n if str_inMethod.get() == 'text' :\n img = makeSimpleQR( txt_direct.get(), val )\n elif not bln_zip.get() :\n img = makeQR( txt_fn.get(), val )\n else :\n with tempfile.TemporaryDirectory() as tmpDn :\n tmpFn = os.path.join( tmpDn, 'portFile.zip' )\n with zipfile.ZipFile( tmpFn, 'w', zipfile.ZIP_DEFLATED ) as zipF :\n zipF.write( txt_fn.get(), os.path.basename( txt_fn.get() ) )\n img = makeQR( tmpFn, val )\n\n qrWin = Tk.Toplevel()\n qrWin.geometry( '385x425' )\n qrWin.title('QR code')\n btn_head = Tk.Button( qrWin, text='<<', command= head_btn_click )\n btn_head.place( x=5, y=5 )\n\n txt_qrno = Tk.StringVar()\n txt_qrno.set( '' )\n lbl_qrno = Tk.Label( qrWin, textvariable= txt_qrno )\n lbl_qrno.place( x=100, y=5 )\n\n btn_next = Tk.Button( qrWin, text='>', command= next_btn_click )\n btn_next.place( x=190, y=5 )\n canvas = Tk.Canvas( qrWin, bg = 'white', width= 385, height= 385 )\n canvas.place( x = 0, y = 40 )\n head_btn_click()\n qrWin.mainloop()\n\ndef decode_btn_click() :\n global txt_fn\n mergeBase64( txt_fn.get() )\n\ndef inMethChange() :\n global str_inMethod\n global btn_fn\n global txt_fn\n global chk_zip\n global btn_dec\n global txt_direct\n if str_inMethod.get() == 'text' :\n btn_fn.configure( state = 'disabled' )\n txt_fn.configure( state = 'disabled' )\n chk_zip.configure( state = 'disabled' )\n btn_dec.configure( state = 'disabled' )\n txt_direct.configure( state = 'normal' )\n else :\n btn_fn.configure( state = 'normal' )\n txt_fn.configure( state = 'normal' )\n chk_zip.configure( state = 'normal' )\n btn_dec.configure( state = 'normal' )\n txt_direct.configure( state = 'disabled' )\n\ndef gui() :\n global btn_fn\n global txt_fn\n global bln_zip\n global chk_zip\n global opt_err_var\n global errCorrTab\n global str_inMethod\n global txt_direct\n global btn_dec\n\n root = Tk.Tk()\n root.geometry( '420x200' )\n root.title('Any File to QRcodes ')\n\n frm_base = Tk.Frame( root, relief = 'flat' )\n frm_base.pack()\n\n frm_in = Tk.LabelFrame( frm_base, text = 'Input' )\n frm_in.pack( fill = Tk.X )\n frm_out = Tk.LabelFrame( frm_base, text = 'Output' )\n frm_out.pack( side = Tk.LEFT )\n frm_out_qr = Tk.LabelFrame( frm_out, text = 'QR Code' )\n frm_out_qr.pack( side = Tk.LEFT )\n frm_out_dec = Tk.LabelFrame( frm_out, text = 'Decode base64' )\n frm_out_dec.pack( side = Tk.LEFT )\n\n frm_in_file = Tk.Frame( frm_in, relief = 'flat' )\n frm_in_file.pack( side = Tk.TOP )\n frm_in_zip = Tk.Frame( frm_in, relief = 'flat' )\n frm_in_zip.pack( side = Tk.TOP )\n frm_in_txt = Tk.Frame( frm_in, relief = 'flat' )\n frm_in_txt.pack( side = Tk.TOP )\n\n str_inMethod = Tk.StringVar()\n str_inMethod.set( 'file' )\n\n rad_fn = Tk.Radiobutton( frm_in_file, text='File Name',\n variable = str_inMethod, value = 'file', command = inMethChange )\n rad_fn.pack( side = Tk.LEFT )\n txt_fn = Tk.Entry( frm_in_file, width = 40 )\n txt_fn.pack( side = Tk.LEFT )\n btn_fn = Tk.Button( frm_in_file, text='Open...', command= file_btn_click )\n btn_fn.pack( side = Tk.LEFT )\n\n bln_zip = Tk.BooleanVar()\n bln_zip.set( False )\n chk_zip = Tk.Checkbutton( frm_in_zip, variable= bln_zip,\n text='with ZIP compression' )\n chk_zip.pack( side = Tk.LEFT )\n\n rad_direct = Tk.Radiobutton( frm_in_txt, text='Direct Text',\n variable = str_inMethod, value = 'text', command = inMethChange )\n rad_direct.pack( side = Tk.LEFT )\n txt_direct = Tk.Entry( frm_in_txt, width = 48 )\n txt_direct.pack( side = Tk.LEFT )\n\n frm_err = Tk.Frame( frm_out_qr, relief = 'flat' )\n frm_err.pack( side = Tk.TOP )\n lbl_fm = Tk.Label( frm_err, text='Error Correct' )\n lbl_fm.pack( side = Tk.LEFT )\n opt_err_var = Tk.StringVar( root )\n OptionList = []\n for _, attr in errCorrTab :\n _, desc = attr\n OptionList.append( desc )\n opt_err_var.set( OptionList[ 0 ] )\n opt_err = Tk.OptionMenu( frm_err, opt_err_var, *OptionList )\n opt_err.config( width= 25 )\n opt_err.pack()\n opt_err.pack( side = Tk.LEFT )\n\n btn = Tk.Button( frm_out_qr,\n text='Display QR codes', command= qrcode_btn_click )\n btn.pack( side = Tk.TOP )\n\n btn_dec = Tk.Button( frm_out_dec,\n text='Output Decoded file', command= decode_btn_click )\n btn_dec.pack( side = Tk.LEFT )\n\n inMethChange()\n root.mainloop()\n\nif __name__ == '__main__' :\n gui()\n", "sub_path": "portFile.py", "file_name": "portFile.py", "file_ext": "py", "file_size_in_byte": 9732, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "qrcode.QRCode", "line_number": 14, "usage_type": "call"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 18, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 18, "usage_type": "name"}, {"api_name": "qrcode.QRCode", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 26, "usage_type": "name"}, {"api_name": "qrcode.constants", "line_number": 29, "usage_type": "attribute"}, {"api_name": "qrcode.constants", "line_number": 30, "usage_type": "attribute"}, {"api_name": "qrcode.constants", "line_number": 32, "usage_type": "attribute"}, {"api_name": "qrcode.constants", "line_number": 33, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "hashlib.sha256", "line_number": 48, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 64, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 116, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 117, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 117, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 119, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 120, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 129, "usage_type": "attribute"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 178, "usage_type": "call"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "tkinter.Toplevel", "line_number": 182, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 185, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 188, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 190, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 193, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 195, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 235, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 239, "usage_type": "call"}, {"api_name": "tkinter.LabelFrame", "line_number": 242, "usage_type": "call"}, {"api_name": "tkinter.X", "line_number": 243, "usage_type": "attribute"}, {"api_name": "tkinter.LabelFrame", "line_number": 244, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 245, "usage_type": "attribute"}, {"api_name": "tkinter.LabelFrame", "line_number": 246, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 247, "usage_type": "attribute"}, {"api_name": "tkinter.LabelFrame", "line_number": 248, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 249, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 251, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 252, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 253, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 254, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 255, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 256, "usage_type": "attribute"}, {"api_name": "tkinter.StringVar", "line_number": 258, "usage_type": "call"}, {"api_name": "tkinter.Radiobutton", "line_number": 261, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 263, "usage_type": "attribute"}, {"api_name": "tkinter.Entry", "line_number": 264, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 265, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 266, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 267, "usage_type": "attribute"}, {"api_name": "tkinter.BooleanVar", "line_number": 269, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 271, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 273, "usage_type": "attribute"}, {"api_name": "tkinter.Radiobutton", "line_number": 275, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 277, "usage_type": "attribute"}, {"api_name": "tkinter.Entry", "line_number": 278, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 279, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 281, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 282, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 283, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 284, "usage_type": "attribute"}, {"api_name": "tkinter.StringVar", "line_number": 285, "usage_type": "call"}, {"api_name": "tkinter.OptionMenu", "line_number": 291, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 294, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 296, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 298, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 300, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 302, "usage_type": "attribute"}]} +{"seq_id": "518579859", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nN = 60\n\npop_iter = np.zeros((2, N))\n\nx1 = .87\ny1 = .64/.87\nz1 = .000\nx2 = .46\ny2 = .07/.46\nz2 = .000\n\nbr1 = .85\n\n\ndef transfer(pop, x1, y1, z1, x2, y2, z2, br1, axes, clr1, clr2):\n\n br2 = 1 - br1\n A = np.array([[1 - (z1 + x1*y1)*br2, (x2*y2 + z2)*br1], [(x1*y1 + z1)*br2, 1 - (z2 + x2*y2)*br1]])\n\n for i in range(N):\n pop_iter[0, i] = pop[0]\n pop_iter[1, i] = pop[1]\n\n pop = A.dot(pop)\n\n axes.plot(pop_iter[0], clr1)\n axes.plot(pop_iter[1], clr2)\n\n print(pop)\n\n\ndef transfer_direct(pop, x1, x2, br1, axes, clr1, clr2):\n\n br2 = 1 - br1\n\n A = np.array([[1 - x1*br2, x2*br1], [x1*br2, 1 - x2*br1]])\n\n for i in range(N):\n pop_iter[0, i] = pop[0]\n pop_iter[1, i] = pop[1]\n\n pop = A.dot(pop)\n\n axes.plot(pop_iter[0], clr1, linewidth=2.)\n axes.plot(pop_iter[1], clr2, linewidth=2.)\n\n print(pop)\n\n\npop = np.array([1., 0.])\nfig, axes = plt.subplots(nrows=1, ncols=1)\ntransfer(pop, .4, .288/.4, .000156, .03636, .02/.03636, .000166, .7, axes, 'r', 'b')\ntransfer(pop, .9, .9, .00015, .1, .5, .00016, .7, axes, 'r*-', 'b*-')\npop = np.array([1., 0.])\ntransfer_direct(pop, .748, .041, .7, axes, 'r--', 'b--')\n\naxes.set_xlabel(\"Number of iterations\")\nplt.show()", "sub_path": "RamanControl_NLOPT_C/SingleFieldRamanAssisted/5level/Transfer.py", "file_name": "Transfer.py", "file_ext": "py", "file_size_in_byte": 1276, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "513110372", "text": "\"\"\"This module implements class that represents the user profile entity.\"\"\"\n\nfrom django.db import models, IntegrityError, transaction\nfrom django.db.utils import OperationalError\nfrom custom_user.models import CustomUser\nfrom utils.abstract_models import AbstractModel\nfrom utils.loggerhelper import LOGGER\n\n\nclass UserProfile(AbstractModel):\n \"\"\"Model for user profile entity.\"\"\"\n user = models.OneToOneField(CustomUser, on_delete=models.CASCADE, related_name='user_profile')\n first_name = models.CharField(max_length=64, blank=True)\n last_name = models.CharField(max_length=64, blank=True)\n telegram_id = models.IntegerField(null=True)\n\n def __str__(self):\n \"\"\"Method that returns route instance as string.\"\"\"\n return f'{self.first_name} {self.last_name}'\n\n def to_dict(self):\n \"\"\"Method that returns dict with object's attributes.\"\"\"\n return {\n 'id': self.id,\n 'first_name': self.first_name,\n 'last_name': self.last_name,\n 'user_id': self.user.id,\n 'telegram_id': self.telegram_id\n }\n\n @classmethod\n def create(cls, user, first_name='', last_name='', telegram_id=None): # pylint: disable=arguments-differ\n \"\"\"Method for object creation.\"\"\"\n user_profile = cls()\n user_profile.first_name = first_name\n user_profile.last_name = last_name\n user_profile.telegram_id = telegram_id\n\n try:\n user_profile.user = user\n user_profile.save()\n return user_profile\n except (ValueError, IntegrityError, OperationalError) as err:\n LOGGER.error(f'Unsuccessful user profile creating. {err}')\n\n def update(self, first_name=None, last_name=None, telegram_id=''): # pylint: disable=arguments-differ\n \"\"\"Method for updating of user profile object\"\"\"\n with transaction.atomic():\n if first_name:\n self.first_name = first_name\n if last_name:\n self.last_name = last_name\n if telegram_id != '':\n self.telegram_id = telegram_id\n try:\n self.save()\n return True\n except (ValueError, OperationalError) as err:\n LOGGER.error(f'Unsuccessful profile\\' parameters updating with id={self.id}. {err}')\n return False\n\n @classmethod\n def get_by_telegram_id(cls, telegram_id):\n \"\"\"Method for finding profile with given telegram_id\"\"\"\n try:\n return cls.objects.get(telegram_id=telegram_id)\n except (ValueError, cls.DoesNotExist, OperationalError) as err:\n LOGGER.error(f'Failed to find profile by telegram id={telegram_id} {err}')\n", "sub_path": "way_to_home/user_profile/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2731, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "utils.abstract_models.AbstractModel", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 12, "usage_type": "call"}, {"api_name": "custom_user.models.CustomUser", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.IntegrityError", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.utils.OperationalError", "line_number": 43, "usage_type": "name"}, {"api_name": "utils.loggerhelper.LOGGER.error", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.loggerhelper.LOGGER", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.utils.OperationalError", "line_number": 58, "usage_type": "name"}, {"api_name": "utils.loggerhelper.LOGGER.error", "line_number": 59, "usage_type": "call"}, {"api_name": "utils.loggerhelper.LOGGER", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.utils.OperationalError", "line_number": 67, "usage_type": "name"}, {"api_name": "utils.loggerhelper.LOGGER.error", "line_number": 68, "usage_type": "call"}, {"api_name": "utils.loggerhelper.LOGGER", "line_number": 68, "usage_type": "name"}]} +{"seq_id": "244031945", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/darcscgi/lib/helpers.py\n# Compiled at: 2009-09-11 13:58:44\n\"\"\"Helper functions\n\nConsists of functions to typically be used within templates, but also\navailable to Controllers. This module is available to templates as 'h'.\n\"\"\"\nfrom webhelpers.html.tags import stylesheet_link, javascript_link\nfrom routes import url_for\nimport os, time, subprocess, operator, email, pyme.core\nfrom pyme.constants import protocol, status, sigsum, validity\n\ndef filelisting(path, column=0, order=False):\n \"\"\" given a path return a tuple (filenames,filesize in kilobytes,last modification time\n \"\"\"\n filenames = []\n for directory_tuple in os.walk(path):\n for file_entry in directory_tuple[2]:\n fullpath = os.path.join(directory_tuple[0], file_entry)\n container = os.path.split(directory_tuple[0])[1]\n if not os.path.islink(fullpath):\n pretty_mtime = time.strftime('%Y-%d-%m %H:%M', time.localtime(os.path.getmtime(fullpath)))\n mtime = int(os.path.getmtime(fullpath))\n fsize = os.path.getsize(fullpath)\n filenames.append((file_entry, fsize, container, pretty_mtime, mtime, fullpath))\n\n return sorted(filenames, key=operator.itemgetter(column), reverse=order)\n\n\ndef safefile(repository, path):\n \"\"\" given a repository path and a relative attempted access path:\n strips the leading slash, joins, normalizes, and in the case of symlinks\n determines the actual location, of the attempted access path\n if the file is not a symlink and exists below the repository path, returns the calculated path\n else return None (which evaluates to false)\n \"\"\"\n path = path.lstrip('/\\\\')\n path = os.path.join(repository, path)\n path = os.path.realpath(os.path.abspath(path))\n if os.path.isfile(path) and not os.path.islink(path):\n if os.path.commonprefix([repository, path]) == repository:\n return path\n else:\n return\n else:\n return\n return\n\n\ndef decodeMessage(messageString):\n \"\"\" given message/rfc822 returns the content\n \"\"\"\n package = email.message_from_string(messageString)\n message = ''\n for part in package.walk():\n if not part.is_multipart():\n message += part.get_payload(decode=True)\n\n return message\n\n\ndef verify_patch(keyring_dirpath, keyring_basepath, input, required_trust, get_output=False, unicode=True):\n \"\"\" verifies a patch and returns results in a tuple\n input: keyring_dirpath, keyring_basepath: safely joined to form the keyring homedir\n input: input message to verify\n get_output: return verified output, or None\n unicode: convert input message from utf-8 to local encoding\n output, a tuple:\n return_bool: true if verified, false elsewhise\n return_string: accumulated string describing operation\n verified output: optional, verified message or None\n \"\"\"\n return_bool, return_string, verified_output = False, '', None\n keyring_basepath = keyring_basepath.lstrip('/\\\\')\n keyring_homedir = os.path.join(keyring_dirpath, keyring_basepath)\n if not os.path.exists(keyring_homedir):\n os.makedirs(keyring_homedir)\n for engine in pyme.core.get_engine_info():\n if engine.protocol == protocol.OpenPGP:\n gpg_executable = engine.file_name\n\n context = pyme.core.Context()\n context.set_engine_info(protocol.OpenPGP, gpg_executable, keyring_homedir.encode('utf-8'))\n if unicode:\n signature = pyme.core.Data(string=input.encode('utf-8'))\n else:\n signature = pyme.core.Data(string=input)\n plain = pyme.core.Data()\n try:\n context.op_verify(signature, None, plain)\n except pyme.errors.GPGMEError:\n return_string += 'Patch is not signed, or improperly signed\\n'\n else:\n result = context.op_verify_result()\n if len(result.signatures) != 1:\n return_string += 'Expected clearsigned document must have one signature\\n'\n else:\n signature = result.signatures[0]\n if signature.status == status.EOF:\n try:\n current_trust = context.get_key(signature.fpr, 0).owner_trust\n except pyme.errors.GPGMEError:\n current_trust = 0\n else:\n if current_trust < required_trust:\n return_string += 'Signature does not have sufficient trust\\n'\n else:\n return_bool = True\n return_string += 'Signature succesfully verified\\n'\n if get_output:\n plain.seek(0, 0)\n verified_output = plain.read()\n else:\n return_string += match_signature_status(signature.summary)\n\n if get_output:\n return (return_bool, return_string, verified_output)\n else:\n return (\n return_bool, return_string)\n return\n\n\ndef match_signature_status(summary):\n \"\"\" performs bitwise matchings between the gpgme_sigsum_t summary vector\n and the GPGME_SIGSUM_* constants to determine the OpenPGP verification\n status\n outputs the results as a formatted string\n \"\"\"\n message = 'OpenPGP warnings:\\n'\n codes = {sigsum.SYS_ERROR: 'SYS_ERROR' + ': system error occured', sigsum.BAD_POLICY: 'BAD_POLICY' + ': policy requirement not met', \n sigsum.CRL_TOO_OLD: 'CRL_TOO_OLD' + ': certificate revocation list too old', \n sigsum.CRL_MISSING: 'CRL_MISSING' + ': no revocation mechanism available', \n sigsum.KEY_MISSING: 'KEY_MISSING' + ': no matching key/certificate', \n sigsum.SIG_EXPIRED: 'SIG_EXPIRED' + ': signature has expired', \n sigsum.KEY_EXPIRED: 'KEY_EXPIRED' + ': key/certificate has expired', \n sigsum.KEY_REVOKED: 'KEY_REVOKED' + ': key/certificate has been revoked', \n sigsum.RED: 'RED' + ': signature is bad', \n sigsum.GREEN: 'GREEN' + ': signature is fully valid', \n sigsum.VALID: 'VALID' + ': signature is valid'}\n for bit_location in codes:\n if bit_location & summary == bit_location:\n message += ' ' + codes[bit_location] + '\\n'\n\n message = message.rstrip(', ') + '\\n'\n return message\n\n\ndef apply_patch(repository_name, repository_path, patch, command, command_options):\n \"\"\" generator object. Given the necessary data, applies a darc patch.\n Yields output throughout the process to keep the user informed in case of exponential merges\n \"\"\"\n name = repository_name.lstrip('/\\\\')\n name = name + '.dpatch.' + time.strftime('%Y.%d.%m.%H.%M.%S', time.localtime())\n path = os.path.join(repository_path, name)\n try:\n patchFile = open(path, 'w')\n try:\n patchFile.write(patch)\n finally:\n patchFile.close()\n\n except IOError:\n yield 'File open failed ::\\n'\n else:\n command.append('apply')\n for args in command_options.split(' '):\n if args != '':\n command.append(arg)\n\n command.append(path.encode('utf-8'))\n yield 'Using the following command:'\n for item in command:\n yield ' ' + item\n\n yield '\\n'\n retproc = subprocess.Popen(command, cwd=repository_path, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n proc_communicate = retproc.communicate()\n yield proc_communicate[0]\n os.remove(path)\n\n\ndef quarantine_prune(repository_name, quarantine_path, max_patches, max_size, patch_length):\n \"\"\" Removes extra patches on a per-repository basis based on information from parameters\n Several quirks:\n 1) will remove a patch in anticipation of a new patch. If subsequent actions fail,\n an extra patch will be removed\n 2) possible to upload a large patch to flush all other patches from the quarantine\n Returns None for successful operation or an error message\n \"\"\"\n if patch_length <= 0:\n return 'Not accepting empty patches\\n'\n else:\n quarantine_path = os.path.join(quarantine_path, repository_name.lstrip('/\\\\'))\n quarantine_list = filelisting(quarantine_path, 4)\n while len(quarantine_list) > 0 and len(quarantine_list) >= max_patches:\n os.remove(quarantine_list[0][5])\n del quarantine_list[0]\n\n if len(quarantine_list) >= max_patches:\n return 'Could not allocate resources to quarantine patch\\n'\n total_size = sum(map(operator.itemgetter(1), quarantine_list))\n total_size = total_size + patch_length\n while len(quarantine_list) > 0 and total_size > max_size:\n total_size -= quarantine_list[0][1]\n os.remove(quarantine_list[0][5])\n del quarantine_list[0]\n\n if total_size > max_size:\n return 'Could not allocate resources to quarantine patch\\n'\n return\n return\n\n\ndef quarantine(repository_name, quarantine_path, patch):\n \"\"\" Generator object. Places the darcs patch in the quarantine\n Yields output throughout the process\n \"\"\"\n quarantine_path = os.path.join(quarantine_path, repository_name.lstrip('/\\\\'))\n name = 'dpatch.' + time.strftime('%Y-%d-%m.%H-%M-%S', time.localtime())\n if not os.path.isdir(quarantine_path):\n os.makedirs(quarantine_path)\n quarantine_file = os.path.join(quarantine_path, name)\n try:\n patchFile = open(quarantine_file, 'w')\n try:\n patchFile.write(patch)\n yield 'Identity not verified, patch written to quarantine\\n'\n finally:\n patchFile.close()\n\n except IOError:\n yield 'File open failed ::\\n'", "sub_path": "pycfiles/darcs_cgi-0.01.001dev-py2.6/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 9946, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.walk", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.islink", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 26, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.getmtime", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.getmtime", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "operator.itemgetter", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.islink", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.commonprefix", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "email.message_from_string", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 81, "usage_type": "call"}, {"api_name": "pyme.core.core.get_engine_info", "line_number": 82, "usage_type": "call"}, {"api_name": "pyme.core.core", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pyme.core", "line_number": 82, "usage_type": "name"}, {"api_name": "pyme.constants.protocol.OpenPGP", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pyme.constants.protocol", "line_number": 83, "usage_type": "name"}, {"api_name": "pyme.core.core.Context", "line_number": 86, "usage_type": "call"}, {"api_name": "pyme.core.core", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pyme.core", "line_number": 86, "usage_type": "name"}, {"api_name": "pyme.constants.protocol.OpenPGP", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pyme.constants.protocol", "line_number": 87, "usage_type": "name"}, {"api_name": "pyme.core.core.Data", "line_number": 89, "usage_type": "call"}, {"api_name": "pyme.core.core", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pyme.core", "line_number": 89, "usage_type": "name"}, {"api_name": "pyme.core.core.Data", "line_number": 91, "usage_type": "call"}, {"api_name": "pyme.core.core", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pyme.core", "line_number": 91, "usage_type": "name"}, {"api_name": "pyme.core.core.Data", "line_number": 92, "usage_type": "call"}, {"api_name": "pyme.core.core", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pyme.core", "line_number": 92, "usage_type": "name"}, {"api_name": "pyme.core.errors", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pyme.core", "line_number": 95, "usage_type": "name"}, {"api_name": "pyme.constants.status.EOF", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pyme.constants.status", "line_number": 103, "usage_type": "name"}, {"api_name": "pyme.core.errors", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pyme.core", "line_number": 106, "usage_type": "name"}, {"api_name": "pyme.constants.sigsum.SYS_ERROR", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pyme.constants.sigsum", "line_number": 135, "usage_type": "name"}, {"api_name": "pyme.constants.sigsum.BAD_POLICY", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pyme.constants.sigsum.CRL_TOO_OLD", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pyme.constants.sigsum", "line_number": 136, "usage_type": "name"}, {"api_name": "pyme.constants.sigsum.CRL_MISSING", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pyme.constants.sigsum", "line_number": 137, "usage_type": "name"}, {"api_name": "pyme.constants.sigsum.KEY_MISSING", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pyme.constants.sigsum", "line_number": 138, "usage_type": "name"}, {"api_name": "pyme.constants.sigsum.SIG_EXPIRED", "line_number": 139, "usage_type": "attribute"}, {"api_name": "pyme.constants.sigsum", "line_number": 139, "usage_type": "name"}, {"api_name": "pyme.constants.sigsum.KEY_EXPIRED", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pyme.constants.sigsum", "line_number": 140, "usage_type": "name"}, {"api_name": "pyme.constants.sigsum.KEY_REVOKED", "line_number": 141, "usage_type": "attribute"}, {"api_name": "pyme.constants.sigsum", "line_number": 141, "usage_type": "name"}, {"api_name": "pyme.constants.sigsum.RED", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pyme.constants.sigsum", "line_number": 142, "usage_type": "name"}, {"api_name": "pyme.constants.sigsum.GREEN", "line_number": 143, "usage_type": "attribute"}, {"api_name": "pyme.constants.sigsum", "line_number": 143, "usage_type": "name"}, {"api_name": "pyme.constants.sigsum.VALID", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pyme.constants.sigsum", "line_number": 144, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 158, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 181, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 181, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path", "line_number": 198, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 201, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 206, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path", "line_number": 223, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 224, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 227, "usage_type": "call"}, {"api_name": "os.path", "line_number": 227, "usage_type": "attribute"}]} +{"seq_id": "486501084", "text": "# -*- coding: utf-8 -*-\n\nimport json\nfrom django.http import HttpResponse\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\n\nfrom misc.decorators import staff_required, common_ajax_response, verify_permission\nfrom common import utils, page\n\nfrom www.admin.interface import PermissionBase\nfrom www.account.interface import UserBase\n\n\n@verify_permission('')\ndef permission(request, template_name='pc/admin/permission.html'):\n permissions = PermissionBase().get_all_permissions()\n return render_to_response(template_name, locals(), context_instance=RequestContext(request))\n\n\n@verify_permission('query_user_permission')\ndef get_all_administrators(request):\n '''\n 获取所有管理员\n '''\n num = 0\n data = []\n\n for x in PermissionBase().get_all_administrators():\n num += 1\n data.append({\n 'num': num,\n 'user_id': x.id,\n 'user_nick': x.nick,\n 'user_avatar': x.get_avatar_65()\n })\n\n return HttpResponse(json.dumps(data), mimetype='application/json')\n\n\n@verify_permission('query_user_permission')\ndef get_user_permissions(request):\n '''\n 获取用户对应权限\n '''\n user_id = request.REQUEST.get('user_id')\n data = PermissionBase().get_user_permissions(user_id)\n user = UserBase().get_user_by_id(user_id)\n return HttpResponse(json.dumps({'permissions': data, 'user': {'user_id': user.id, 'user_nick': user.nick}}), mimetype='application/json')\n\n\n@verify_permission('modify_user_permission')\n@common_ajax_response\ndef save_user_permission(request):\n '''\n 保存用户权限\n '''\n user_id = request.REQUEST.get('user_id')\n permissions = request.REQUEST.getlist('permissions')\n\n return PermissionBase().save_user_permission(user_id, permissions, request.user.id)\n\n\n@verify_permission('cancel_admin')\n@common_ajax_response\ndef cancel_admin(request):\n '''\n 取消管理员\n '''\n user_id = request.REQUEST.get('user_id')\n\n return PermissionBase().cancel_admin(user_id)\n", "sub_path": "www/admin/views_permission.py", "file_name": "views_permission.py", "file_ext": "py", "file_size_in_byte": 2048, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "www.admin.interface.PermissionBase", "line_number": 17, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 18, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 18, "usage_type": "call"}, {"api_name": "misc.decorators.verify_permission", "line_number": 15, "usage_type": "call"}, {"api_name": "www.admin.interface.PermissionBase", "line_number": 29, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 38, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 38, "usage_type": "call"}, {"api_name": "misc.decorators.verify_permission", "line_number": 21, "usage_type": "call"}, {"api_name": "www.admin.interface.PermissionBase", "line_number": 47, "usage_type": "call"}, {"api_name": "www.account.interface.UserBase", "line_number": 48, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 49, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}, {"api_name": "misc.decorators.verify_permission", "line_number": 41, "usage_type": "call"}, {"api_name": "www.admin.interface.PermissionBase", "line_number": 61, "usage_type": "call"}, {"api_name": "misc.decorators.verify_permission", "line_number": 52, "usage_type": "call"}, {"api_name": "misc.decorators.common_ajax_response", "line_number": 53, "usage_type": "name"}, {"api_name": "www.admin.interface.PermissionBase", "line_number": 72, "usage_type": "call"}, {"api_name": "misc.decorators.verify_permission", "line_number": 64, "usage_type": "call"}, {"api_name": "misc.decorators.common_ajax_response", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "393877843", "text": "#!/usr/bin/env python\n\"\"\"\nMdown CLI\n\nFront end CLI that allows the batch conversion of\nmarkdown files to HTML. Also accepts an input stream\nfor piping markdown.\n\nLicensed under MIT\nCopyright (c) 2014 Isaac Muse \n\"\"\"\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom mdown import Mdown, Mdowns, load_text_resource\nfrom os.path import dirname, abspath, normpath, exists\nfrom os.path import isfile, isdir, splitext, join, basename\nimport sys\nfrom file_strip.json import sanitize_json\nimport json\nimport subprocess\nimport webbrowser\nimport traceback\nimport tempfile\nimport codecs\nimport re\n\n__version_info__ = (0, 3, 1)\n__version__ = '.'.join(map(str, __version_info__))\n\nif sys.platform.startswith('win'):\n _PLATFORM = \"windows\"\nelif sys.platform == \"darwin\":\n _PLATFORM = \"osx\"\nelse:\n _PLATFORM = \"linux\"\n\nCRITIC_IGNORE = 0\nCRITIC_VIEW = 1\nCRITIC_DUMP = 2\n\nCRITIC_OPT_MAP = {\n CRITIC_IGNORE: \"ignore\",\n CRITIC_VIEW: \"view\",\n CRITIC_DUMP: \"ignore\"\n}\n\n\nclass Logger(object):\n \"\"\" Log messages \"\"\"\n quiet = False\n\n @classmethod\n def log(cls, msg):\n \"\"\" Log if not quiet \"\"\"\n\n if not cls.quiet:\n print(msg)\n\n\nclass CriticDump(object):\n RE_CRITIC = re.compile(\n r'''\n ((?P\\{)\n (?:\n (?P\\+{2})(?P.*?)(?P\\+{2})\n | (?P\\-{2})(?P.*?)(?P\\-{2})\n | (?P\\={2})(?P.*?)(?P\\={2})\n | (?P(?P\\>{2})(?P.*?)(?P\\<{2}))\n | (?P\\~{2})(?P.*?)(?P\\~\\>)(?P.*?)(?P\\~{2})\n )\n (?P\\})|.)\n ''',\n re.MULTILINE | re.DOTALL | re.VERBOSE\n )\n\n def process(self, m):\n if self.accept:\n if m.group('ins_open'):\n return m.group('ins_text')\n elif m.group('del_open'):\n return ''\n elif m.group('mark_open'):\n return m.group('mark_text')\n elif m.group('com_open'):\n return ''\n elif m.group('sub_open'):\n return m.group('sub_ins_text')\n else:\n return m.group(0)\n else:\n if m.group('ins_open'):\n return ''\n elif m.group('del_open'):\n return m.group('del_text')\n elif m.group('mark_open'):\n return m.group('mark_text')\n elif m.group('com_open'):\n return ''\n elif m.group('sub_open'):\n return m.group('sub_del_text')\n else:\n return m.group(0)\n\n def dump(self, source, accept):\n \"\"\" Match and store Fenced Code Blocks in the HtmlStash. \"\"\"\n text = ''\n self.accept = accept\n for m in self.RE_CRITIC.finditer(source):\n text += self.process(m)\n return text\n\n\ndef get_settings(file_name, preview, critic_mode, reject):\n \"\"\"\n Get the settings and add absolutepath\n extention if a preview is planned.\n Unpack the settings file if needed.\n \"\"\"\n\n # Use default file if one was not provided\n if file_name is None or not exists(file_name):\n file_name = join(script_path, \"mdown.json\")\n\n # Unpack default settings file if needed\n if not exists(file_name):\n text = load_text_resource(\"mdown.json\")\n try:\n with codecs.open(file_name, \"w\", encoding=\"utf-8\") as f:\n f.write(text)\n except:\n print(traceback.format_exc())\n pass\n\n # Try and read settings file\n settings = {}\n try:\n with open(file_name, \"r\") as f:\n settings = json.loads(sanitize_json(f.read()))\n except:\n # print(traceback.format_exc())\n pass\n\n absolute = False\n critic_found = []\n extensions = settings.get(\"extensions\", [])\n for i in range(0, len(extensions)):\n name = extensions[i]\n if name.startswith(\"mdownx.absolutepath\"):\n absolute = True\n if name.startswith(\"critic\"):\n critic_found.append(i)\n\n # Ensure the user can never set critic mode\n for index in reversed(critic_found):\n del extensions[index]\n\n # Ensure previews are using absolute paths\n if preview and not absolute:\n extensions.append(\"mdownx.absolutepath(base_path=${BASE_PATH})\")\n settings[\"extensions\"] = extensions\n\n # Handle the appropriate critic mode internally\n # Critic must be appended to end of extension list\n extensions.append(\n \"mdownx.critic(mode=%s,accept=%s)\" % (\n CRITIC_OPT_MAP[critic_mode], not reject\n )\n )\n\n return settings\n\n\ndef get_title(md_file, title_val, is_stream):\n \"\"\" Get title for HTML \"\"\"\n if title_val is not None:\n title = title_val\n elif not is_stream:\n title = basename(abspath(md_file))\n else:\n title = None\n return title\n\n\ndef get_output(md_file, index, output_val, terminal, is_stream, critic_mode, reject):\n \"\"\"\n Get the path to output the file.\n If doing multiple files and pointing to a directory,\n it will convert the current file name to a HTML filename\n in that directory.\n\n If doing multiple files and not pointing to a directory,\n it will use ${count} to have different file names. If you\n forget this, it will rewrite the same file over and over.\n\n If doing a single file, and pointing to a directory, it will\n convert the current file name to a HTML filename in that directory.\n\n If doing a single file and not pointing to a directory, it will\n use the file path given.\n\n If doing a stream and pointing to a directory, the output will be\n streamed to the terminal (stdout).\n\n If doing a stream and not pointing to a directory, the output will\n be streamed to that file path.\n\n If creating the output fails by mdown or none of these conditions are met,\n the output will default to the terminal.\n \"\"\"\n\n if terminal:\n # We want stdout\n output = None\n elif output_val is not None and output_val != \"\":\n # Output is specified\n name = abspath(output_val)\n if exists(name) and isdir(name):\n # Its a directory\n if not is_stream:\n # Use path and own name\n if critic_mode is not CRITIC_DUMP:\n output = join(name, \"%s.html\" % splitext(abspath(md_file))[0])\n else:\n if reject:\n label = \"(rejected)\"\n else:\n label = \"(accepted)\"\n base, ext = splitext(abspath(md_file))\n output = join(name, \"%s%s%s\" % (base, label, ext))\n else:\n # Stream: don't know what the file should be called\n output = None\n else:\n # Apply mult-pattern to name\n output = name.replace(\"${count}\", index)\n elif not is_stream:\n if critic_mode is not CRITIC_DUMP:\n # Single or multi file: use own name\n output = \"%s.html\" % splitext(abspath(md_file))[0]\n else:\n if reject:\n label = \"(rejected)\"\n else:\n label = \"(accepted)\"\n base, ext = splitext(abspath(md_file))\n output = \"%s%s%s\" % (base, label, ext)\n else:\n output = None\n\n if not is_stream and output == md_file:\n output = None\n return output\n\n\ndef get_base_path(md_file, basepath, is_stream):\n \"\"\" Get the base path to use when resolving basepath paths if possible \"\"\"\n\n if basepath is not None and exists(basepath):\n # A valid path was fed in\n path = basepath\n base_path = dirname(abspath(path)) if isfile(path) else abspath(path)\n elif not is_stream:\n # Use the current file path\n base_path = dirname(abspath(md_file))\n else:\n # Okay, there is no way to tell the orign.\n # We are probably a stream that has no specified\n # physical location.\n base_path = None\n return base_path\n\n\ndef get_files(file_patterns):\n \"\"\" Find and return files matching the given patterns \"\"\"\n\n import glob\n files = []\n all_files = []\n if len(file_patterns):\n for pattern in file_patterns:\n files += glob.glob(pattern)\n for f in files:\n all_files.append(abspath(normpath(f)))\n return all_files\n\n\ndef get_file_stream(encoding):\n \"\"\" Get the file stream \"\"\"\n\n import fileinput\n import traceback\n sys.argv = []\n text = []\n try:\n for line in fileinput.input():\n text.append(line.decode(encoding))\n stream = ''.join(text)\n except:\n Logger.log(traceback.format_exc())\n stream = None\n text = None\n return stream\n\n\ndef auto_open(name):\n \"\"\" Auto open HTML \"\"\"\n\n # Maybe just use destop\n if _PLATFORM == \"osx\":\n # TODO: parse plist for default browser\n # Probably should parse com.apple.LaunchServices.plist for\n # \n # LSHandlerRoleAll\n # com.google.chrome <--To get this\n # LSHandlerURLScheme\n # http <--Parse for this\n # \n subprocess.Popen(['open', name])\n elif _PLATFORM == \"windows\":\n webbrowser.open(name, new=2)\n else:\n try:\n # Maybe...?\n subprocess.Popen(['xdg-open', name])\n except OSError:\n webbrowser.open(name, new=2)\n # Well we gave it our best...\n pass\n\n\ndef critic_dump(md_file, enc, out, stream, reject):\n \"\"\" Process the critic marks and dump the modified file \"\"\"\n\n status = 0\n text = None\n try:\n if not stream:\n with codecs.open(md_file, \"r\", encoding=enc) as f:\n text = CriticDump().dump(f.read(), not reject)\n else:\n text = CriticDump().dump(md_file, not reject)\n except:\n Logger.log(traceback.format_exc())\n status = 1\n if text is not None:\n if out is not None:\n try:\n if out is not None:\n with codecs.open(out, \"w\", encoding=enc) as f:\n f.write(text)\n except:\n status = 1\n print(text.encode(enc))\n else:\n print(text.encode(enc))\n return status\n\n\ndef html_dump(md_file, enc, out, stream, html_title, base_path, preview, settings):\n \"\"\" Dump HTML \"\"\"\n\n status = 0\n # Instantiate Mdown class\n mdown = (Mdowns if stream else Mdown)(\n md_file, enc,\n title=html_title, base_path=base_path, settings=settings\n )\n\n # If all went well, either preview the file,\n # or write it to file or terminal\n if mdown.error is None:\n if preview:\n try:\n with tempfile.NamedTemporaryFile(delete=False, suffix=\".html\") as f:\n f.write(mdown.markdown)\n auto_open(f.name)\n except:\n Logger.log(traceback.format_exc())\n print(mdown.markdown)\n status = 1\n else:\n mdown.write(out)\n if status == 0 and mdown.error is not None:\n print(mdown.markdown)\n Logger.log(mdown.error)\n status = 1\n else:\n Logger.log(mdown.error)\n status = 1\n return status\n\n\ndef convert(\n markdown=[], title=None, encoding=\"utf-8\",\n output=None, basepath=None, preview=False,\n stream=False, terminal=False, quiet=False,\n text_buffer=None, critic_mode=CRITIC_IGNORE,\n reject=False, settings_path=None\n):\n \"\"\" Convert markdown file(s) to html \"\"\"\n status = 0\n Logger.quiet = quiet\n\n # Get file(s) or stream\n enc = encoding\n files = None\n\n if stream:\n files = [get_file_stream(enc)]\n if text_buffer is not None:\n stream = True\n files = [text_buffer]\n if files is None:\n files = get_files(markdown)\n\n # Make sure we have something we can process\n if files is None or len(files) == 0 or files[0] is None:\n Logger.log(\"No file to parse!\")\n status = 1\n\n if status == 0:\n count = 0\n for md_file in files:\n # Quit dumping if there was an error\n if status != 0:\n break\n\n if stream:\n Logger.log(\"Converting buffer...\")\n else:\n Logger.log(\"Converting %s...\" % md_file)\n\n # Get base path to use when resolving basepath paths\n base_path = get_base_path(md_file, basepath, stream)\n\n # Get output location\n out = get_output(md_file, count, output, terminal, stream, critic_mode, reject)\n\n # Get the title to be used in the HTML\n html_title = get_title(md_file, title, stream)\n\n # Get the settings if available\n settings = get_settings(settings_path, preview, critic_mode, reject)\n\n if critic_mode == CRITIC_DUMP:\n status = critic_dump(md_file, enc, out, stream, reject)\n else:\n status = html_dump(\n md_file, enc, out, stream, html_title,\n base_path, preview, settings\n )\n return status\n\n\nif __name__ == \"__main__\":\n import argparse\n\n def first_or_none(item):\n return item[0] if item is not None else None\n\n def main():\n \"\"\" Go! \"\"\"\n\n parser = argparse.ArgumentParser(prog='mdown', description='Markdown generator')\n # Flag arguments\n parser.add_argument('--version', action='version', version=\"%(prog)s \" + __version__)\n parser.add_argument('--quiet', '-q', action='store_true', default=False, help=\"No messages on stdout.\")\n parser.add_argument('--preview', '-p', action='store_true', default=False, help=\"Output to preview (temp file). --output will be ignored.\")\n me_group = parser.add_mutually_exclusive_group()\n me_group.add_argument('--critic', '-c', action='store_true', default=False, help=\"Show critic marks in a viewable html output.\")\n me_group.add_argument('--critic-dump', '-C', action='store_true', default=False, help=\"Process critic marks, dumps file(s), and exits.\")\n parser.add_argument('--reject', '-r', action='store_true', default=False, help=\"Reject propossed critic marks when using in normal processing and --critic-dump processing\")\n parser.add_argument('--terminal', '-t', action='store_true', default=False, help=\"Print to terminal (stdout).\")\n parser.add_argument('--output', '-o', nargs=1, default=None, help=\"Output directory can be a directory or file_name. Use ${count} when exporting multiple files and using a file pattern.\")\n parser.add_argument('--stream', '-s', action='store_true', default=False, help=\"Streaming input. markdown file inputs will be ignored.\")\n parser.add_argument('--settings', '-S', nargs=1, default=None, help=\"Load the settings file from an alternate location.\")\n parser.add_argument('--title', '-T', nargs=1, default=None, help=\"Title for HTML.\")\n parser.add_argument('--encoding', '-e', nargs=1, default=[\"utf-8\"], help=\"Encoding for input.\")\n parser.add_argument('--basepath', '-b', nargs=1, default=None, help=\"The basepath location mdown should use.\")\n parser.add_argument('markdown', nargs='*', default=[], help=\"Markdown file(s) or file pattern(s).\")\n\n args = parser.parse_args()\n\n critic_mode = CRITIC_IGNORE\n if args.critic_dump:\n critic_mode = CRITIC_DUMP\n elif args.critic:\n critic_mode = CRITIC_VIEW\n\n return convert(\n encoding=args.encoding[0],\n basepath=first_or_none(args.basepath),\n terminal=args.terminal,\n critic_mode=critic_mode,\n reject=args.reject,\n stream=args.stream,\n title=first_or_none(args.title),\n quiet=args.quiet,\n preview=args.preview,\n output=first_or_none(args.output),\n settings_path=first_or_none(args.settings),\n markdown=args.markdown\n )\n\n script_path = dirname(abspath(sys.argv[0]))\n sys.exit(main())\nelse:\n script_path = dirname(abspath(__file__))\n", "sub_path": "__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 16549, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.platform.startswith", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sys.platform", "line_number": 33, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 62, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 74, "usage_type": "attribute"}, {"api_name": "re.DOTALL", "line_number": 74, "usage_type": "attribute"}, {"api_name": "re.VERBOSE", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 126, "usage_type": "call"}, {"api_name": "mdown.load_text_resource", "line_number": 127, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 129, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 132, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 139, "usage_type": "call"}, {"api_name": "file_strip.json.sanitize_json", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 230, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 265, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 265, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 284, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 284, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 293, "usage_type": "attribute"}, {"api_name": "fileinput.input", "line_number": 296, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 300, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 319, "usage_type": "call"}, {"api_name": "webbrowser.open", "line_number": 321, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 325, "usage_type": "call"}, {"api_name": "webbrowser.open", "line_number": 327, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 339, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 344, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 350, "usage_type": "call"}, {"api_name": "mdown.Mdowns", "line_number": 365, "usage_type": "name"}, {"api_name": "mdown.Mdown", "line_number": 365, "usage_type": "name"}, {"api_name": "mdown.error", "line_number": 372, "usage_type": "attribute"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 375, "usage_type": "call"}, {"api_name": "mdown.markdown", "line_number": 376, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 379, "usage_type": "call"}, {"api_name": "mdown.markdown", "line_number": 380, "usage_type": "attribute"}, {"api_name": "mdown.write", "line_number": 383, "usage_type": "call"}, {"api_name": "mdown.error", "line_number": 384, "usage_type": "attribute"}, {"api_name": "mdown.markdown", "line_number": 385, "usage_type": "attribute"}, {"api_name": "mdown.error", "line_number": 386, "usage_type": "attribute"}, {"api_name": "mdown.error", "line_number": 389, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 465, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 506, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 506, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 506, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 507, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 509, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 509, "usage_type": "call"}]} +{"seq_id": "619736191", "text": "import os, json, numpy as np\nos.environ[\"PYTHONIOENCODING\"] = \"utf-8\"\n\nIN_DIR = \"./data/web/data/\"\nFIELDS = [\n '#HOST_KEYWORDS_ANCHOR:', '#META_KEYWORDS:', \n # '#DIRECT_KEYWORDS', #* 값이 없음\n '#BREAD_CRUMB:', \n '#BREAD_CRUMB_PARENT:', \n '#DESC:', \n '#BODY_LARGER:', '#BODY_TOP:', '#BODY_TEXT:', \n '#NONBODY_TEXT:',\n '#HTML_H1:', '#HTML_H2:', '#HTML_H3:',\n '#HTML_TITLE:', '#HTML_TITLE_HEAD:', \n '#IN_DOMAIN_ANCHOR:', '#OUT_DOMAIN_ANCHOR:', \n '#FIRST_OUT_DOMAIN_TITLE:', '#ONLY_OUT_DOMAIN_TITLE:', '#OUT_DOMAIN_TITLE:', \n \n # '#SUBLINK_ALIAS:', \"#SUBLINK_TITLE:\", #* 값이 없음\n \n '#SITE_NAME:',\n \n #* precision, recall 이 너무 낮음\n # '#URL_PATH_ELEMENTS:', \n # '#URL_DOMAIN_ELEMENTS:', '#URL_PLD_ELEMENTS:', '#URL_QUERY_ELEMENTS:', '#URL_SUBDOMAIN_ELEMENTS:', \n\n # '#QUERY_TEXT:', '#URL:', '#CLICK_COUNT',\n]\n# * seq (a sequence of words), \n# * multi-seq (sequences of words), \n# * list (words without sequential order)\nFIELD_TO_type = {\n \"#HOST_KEYWORDS_ANCHOR:\":\"multi-seq\",\n \"#META_KEYWORDS:\":\"list\",\n \"#BREAD_CRUMB:\":\"seq\",\n \"#BREAD_CRUMB_PARENT:\":\"seq\",\n \"#DESC:\":\"seq\",\n \"#BODY_LARGER:\": \"seq\",\n \"#BODY_TOP:\":\"seq\",\n \"#BODY_TEXT:\":\"seq\",\n \"#NONBODY_TEXT:\":\"seq\",\n \"#HTML_H1:\":\"seq\",\n \"#HTML_H2:\":\"seq\",\n \"#HTML_H3:\":\"seq\",\n \"#HTML_TITLE:\":\"seq\",\n \"#HTML_TITLE_HEAD:\":\"list\",\n \"#IN_DOMAIN_ANCHOR:\":\"multi-seq\",\n \"#OUT_DOMAIN_ANCHOR:\":\"multi-seq\",\n \"#FIRST_OUT_DOMAIN_TITLE:\":\"multi-seq\",\n \"#ONLY_OUT_DOMAIN_TITLE:\":\"multi-seq\",\n \"#OUT_DOMAIN_TITLE:\":\"multi-seq\",\n \"#SITE_NAME:\":\"seq\",\n}\ndef preprocess(v):\n if \"\\n\" in v:\n v = v.split(\"\\n\")\n v = [_v if \"\\t\" not in _v else _v.split(\"\\t\")[1] for _v in v]\n v = \"\\n\".join(\"\\n\")\n return v if v.strip() else None\n else:\n v = v if \"\\t\" not in v else v.split(\"\\t\")[1]\n return v if v.strip() else None\nin_path_ls = [IN_DIR + in_path for in_path in sorted(os.listdir(IN_DIR))]\ncurrent_file_path = os.path.abspath(__file__)\ndirname = os.path.dirname(current_file_path)+\"/data/web\"\nsrc_json_data = []\ntgt_json_data = []\nfor in_path in in_path_ls:\n with open(in_path, \"r\", encoding=\"utf8\") as fp_read:\n data = fp_read.readlines()\n for line in data:\n url, gdid, raw_content = line.strip().split(\"\\t\")\n content_dict = json.loads(raw_content)\n src_dict = {\n k:preprocess(v)\n for k, v in content_dict.items()\n if k in FIELDS\n }\n tgt = content_dict[\"present_click_query_list\"]\\\n if \"present_click_query_list\" in content_dict\\\n else content_dict[\"click_query_list\"]\n tgt = [dat.split(\"\\t\")[1] for dat in tgt.split(\"|\")]\n src_json = json.dumps(src_dict)\n tgt_json = json.dumps(tgt)\n src_json_data.append(src_json+\"\\n\")\n tgt_json_data.append(tgt_json+\"\\n\")\n\nshuffle_index = np.random.permutation(len(src_json_data))\nn_valid, n_test = 1000, 1000\nn_train = len(shuffle_index) -n_valid -n_test\nidx_train = shuffle_index[:n_train]\nidx_valid = shuffle_index[n_train:n_train+n_valid]\nidx_test = shuffle_index[n_train+n_valid:]\nfor data_type, idx_ls in [\n (\"valid\", idx_valid),\n (\"test\", idx_test),\n (\"train\", idx_train)\n]:\n src_path = \"{}/src-{}.txt\".format(dirname, data_type)\n tgt_path = \"{}/tgt-{}.txt\".format(dirname, data_type)\n with open(src_path, \"w\", encoding=\"utf8\") as src_write\\\n ,open(tgt_path, \"w\", encoding=\"utf8\") as tgt_write :\n for idx in idx_ls:\n src_write.write(src_json_data[idx])\n tgt_write.write(tgt_json_data[idx])\n\"\"\"\nfor in_path in in_path_ls:\n basename = os.path.basename(in_path)\n src_out_path = \"{}/{}-src.txt\".format(dirname, basename)\n tgt_out_path = \"{}/{}-tgt.txt\".format(dirname, basename)\n with open(in_path, \"r\", encoding=\"utf8\") as fp_read, \\\n open(src_out_path, \"w\", encoding=\"utf8\") as fp_write_src,\\\n open(tgt_out_path, \"w\", encoding=\"utf8\") as fp_write_tgt:\n data = fp_read.readlines()\n for line in data:\n url, gdid, raw_content = line.strip().split(\"\\t\")\n content_dict = json.loads(raw_content)\n src_dict = {\n k:preprocess(v)\n for k, v in content_dict.items()\n if k in FIELDS\n }\n tgt = content_dict[\"present_click_query_list\"]\\\n if \"present_click_query_list\" in content_dict\\\n else content_dict[\"click_query_list\"]\n tgt = [dat.split(\"\\t\")[1] for dat in tgt.split(\"|\")]\n src_json = json.dumps(src_dict)\n tgt_json = json.dumps(tgt)\n fp_write_src.write(src_json+\"\\n\")\n fp_write_tgt.write(tgt_json+\"\\n\")\n\"\"\" \n", "sub_path": "construct_web_data.py", "file_name": "construct_web_data.py", "file_ext": "py", "file_size_in_byte": 4847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.environ", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 72, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 82, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 87, "usage_type": "attribute"}]} +{"seq_id": "126557904", "text": "# coding=utf-8\n\n\nimport pygame\n\nfrom shittychess_settings import ShittySettings\nfrom shittychess_events import ShittyEventMonitor\nfrom shittychess_board import ShittyBoard\nfrom shittychess_logic import ShittyLogic\nfrom shittychess_layout import ShittyLayout\n\n\nclass ShittyChess:\n\n def __init__(self) -> None:\n pygame.init()\n pygame.display.set_caption(\"Shitty Chess\")\n self.settings = ShittySettings()\n self.screen = pygame.display.set_mode((self.settings.screen_width(), self.settings.screen_height()))\n self.board = ShittyBoard(self.screen, self.settings)\n self.logic = ShittyLogic(self.settings)\n self.layout = ShittyLayout(self.screen, self.settings, self.logic)\n self.event_monitor = ShittyEventMonitor(self.screen, self.settings, self.layout)\n self.exiting = False\n\n\n def run_game(self) -> None:\n \"\"\"This is the main function of the program which runs the code.\"\"\"\n\n self.main_loop()\n\n\n def main_loop(self) -> None:\n while not self.exiting:\n self.event_monitor.process_events()\n if self.settings.headers_enabled:\n self.screen.fill(self.settings.header_background_color)\n self.board.draw()\n self.layout.draw()\n pygame.display.flip()\n\n\nif __name__ == '__main__':\n chess = ShittyChess()\n chess.run_game()\n", "sub_path": "shittychess.py", "file_name": "shittychess.py", "file_ext": "py", "file_size_in_byte": 1377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pygame.init", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 17, "usage_type": "attribute"}, {"api_name": "shittychess_settings.ShittySettings", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 19, "usage_type": "attribute"}, {"api_name": "shittychess_board.ShittyBoard", "line_number": 20, "usage_type": "call"}, {"api_name": "shittychess_logic.ShittyLogic", "line_number": 21, "usage_type": "call"}, {"api_name": "shittychess_layout.ShittyLayout", "line_number": 22, "usage_type": "call"}, {"api_name": "shittychess_events.ShittyEventMonitor", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 40, "usage_type": "attribute"}]} +{"seq_id": "14952977", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 22 09:23:52 2017\n\n@author: cristian bonato\n\"\"\"\n\nimport pylab as pl\nimport time\nfrom tools import data_object as DO\nimport numpy as np\nfrom measurements.libs.mapper_scanners import move_smooth\n\n\nclass XYScan ():\n def __init__(self, scanner_axes=None, detectors=None):\n self._scanner_axes = scanner_axes\n self._detectors = detectors\n\n self.delayBetweenPoints = 1\n self.delayBetweenRows = 0.5\n self.trigger_active = True\n self.feedback_active = True\n\n self._back_to_zero = False\n\n # determine the longest test delay in the detectors\n if self._detectors is not None:\n self.max_delay_state_check = max([detector.delay_state_check for detector in self._detectors])\n self.max_delay_after_readout = max([detector.delay_after_readout for detector in self._detectors])\n else:\n self.max_delay_state_check = 0\n self.max_delay_after_readout = 0\n\n def set_delays(self, between_points, between_rows):\n self.delayBetweenPoints = between_points\n self.delayBetweenRows = between_rows\n\n def set_back_to_zero(self):\n # to go back to 0 V at the end of a scan\n self._back_to_zero = True\n\n def set_range(self, xLims, xStep, yLims=None, yStep=None):\n\n self.xNbOfSteps = int(abs(pl.floor((float(xLims[1]) - float(xLims[0])) / float(xStep))) + 1)\n self.xPositions = pl.linspace(xLims[0], xLims[1], self.xNbOfSteps)\n self.xStep = xStep\n if yLims is not None or yStep is not None:\n self.yNbOfSteps = int(abs(pl.floor((float(yLims[1]) - float(yLims[0])) / float(yStep))) + 1)\n self.yPositions = pl.linspace(yLims[0], yLims[1], self.yNbOfSteps)\n self.yStep = yStep\n else:\n self.yNbOfSteps = 1\n self.yPositions = pl.array([0])\n self.yStep = 0\n self.totalNbOfSteps = self.xNbOfSteps * self.yNbOfSteps\n\n if self._detectors is None:\n self.counts = None\n else:\n self.counts = [pl.zeros([self.xNbOfSteps, self.yNbOfSteps]) for detector in self._detectors]\n # print(self.counts)\n\n def set_trigger(self, trigger=True, feedback=True):\n self.trigger_active = trigger\n self.feedback_active = feedback\n\n def seconds_in_HMS(self, nbOfSeconds):\n hours = pl.floor(nbOfSeconds / 3600)\n minutes = pl.floor(nbOfSeconds % 3600 / 60)\n seconds = nbOfSeconds - minutes*60 - hours*3600\n return hours, minutes, seconds\n\n def print_elapsed_time(self, start_time, current_index, total_nb_of_steps):\n elapsedTime = float(time.time() - start_time)\n remainingTime = elapsedTime / current_index * (total_nb_of_steps - (current_index-1))\n \n hoursE, minutesE, secondsE = self.seconds_in_HMS(elapsedTime)\n hoursR, minutesR, secondsR = self.seconds_in_HMS(remainingTime)\n\n print('Elapsed time: {:.0f} h {:.0f} min {:.0f} s\\tRemaining time: {:.0f} h {:.0f} min {:.0f} s'.format(hoursE, minutesE, secondsE, hoursR, minutesR, secondsR))\n\n def wait_first_point(self, detectors):\n if detectors is not None:\n while not all([detector.first_point() for detector in detectors]):\n time.sleep(self.max_delay_state_check)\n\n def wait_for_ready(self, detectors):\n if detectors is not None:\n while not all([detector.is_ready() for detector in detectors]):\n time.sleep(self.max_delay_state_check)\n\n def init_detectors(self, detectors):\n if detectors is not None:\n for detector in detectors:\n detector.initialize()\n\n def init_scanners(self, scanner_axes):\n if scanner_axes is not None:\n for scanner_axis in scanner_axes:\n scanner_axis.initialize()\n\n def run_scan(self, close_instruments=True, silence_errors=True):\n try:\n self.init_detectors(self._detectors)\n self.init_scanners(self._scanner_axes)\n\n print('Total number of steps: {}\\n'.format(self.totalNbOfSteps) +\n 'X number of steps: {}\\n'.format(self.xNbOfSteps) +\n 'Y number of steps: {}'.format(self.yNbOfSteps))\n\n start_time = 0\n first_point = True\n idx = 0\n\n #self._scanner_axes[0].move_smooth(self.xPositions[0])\n #self._scanner_axes[1].move_smooth(self.yPositions[0])\n move_smooth(self._scanner_axes, targets=[self.xPositions[0], self.yPositions[0]])\n\n print('\\nScanners are at start position. Waiting for acquisition.\\n')\n\n print('step \\tx (V)\\ty (V)')\n\n for id_y, y in enumerate(self.yPositions):\n firstInRow = True\n \n #print (\"y = \", y)\n\n for id_x, x in enumerate(self.xPositions):\n idx += 1\n \n #print (\"x = \", x)\n\n self._scanner_axes[0].move(x)\n try:\n self._scanner_axes[1].move(y)\n except IndexError:\n pass\n\n # display update\n print('{}/{} \\t{:.1f} \\t{:.1f}'.format(idx, self.totalNbOfSteps, x, y))\n\n # For first point may wait for a reaction \n # (when acquisition is launched in WinSpec and the old Acton spectrometers)\n if first_point:\n self.wait_first_point(self._detectors)\n start_time = time.time()\n first_point = False\n else:\n if idx % 10 == 0:\n self.print_elapsed_time(start_time=start_time, current_index=idx, total_nb_of_steps=self.totalNbOfSteps)\n\n # delay between rows\n if firstInRow:\n time.sleep(self.delayBetweenRows)\n firstInRow = False\n\n # delay between points\n time.sleep(self.delayBetweenPoints)\n\n # trigger exposure / detector measurement\n if self._detectors is not None:\n for counts, detector in zip(self.counts, self._detectors):\n counts[id_x, id_y] = detector.readout() # POSSIBLE BLOCKING BEHAVIOUR HERE! put non blocking (spectros...) before blocking (apds...) in the detectors list\n\n time.sleep(self.max_delay_after_readout) # some old devices will not react immediately to say they are integrating\n\n # wait for detector to say it finished\n self.wait_for_ready(self._detectors)\n\n # move back to first point of row smoothly\n if y != self.yPositions[-1]:\n self._scanner_axes[0].move_smooth(target=self.xPositions[0])\n\n # go smoothly to start position\n if self._back_to_zero:\n print('\\nGoing back to 0 V on scanners...')\n\n #self._scanner_axes[0].move_smooth(0)\n #self._scanner_axes[1].move_smooth(0)\n \n move_smooth(self._scanner_axes, targets=[0, 0])\n\n #move_smooth(self._scanner_axes, targets=[self.xPositions[0], self.yPositions[0]])\n\n print('\\nSCAN COMPLETED\\n' +\n 'X from {:.2f} V to {:.2f} V with step size {:.2f} V (nb of steps: {})\\n'.format(self.xPositions[0], self.xPositions[-1], self.xStep, self.xNbOfSteps) +\n 'Y from {:.2f} V to {:.2f} V with step size {:.2f} V (nb of steps: {})\\n'.format(self.yPositions[0], self.yPositions[-1], self.yStep, self.yNbOfSteps) +\n 'Total number of steps: {}'.format(self.totalNbOfSteps))\n\n except KeyboardInterrupt:\n print('\\n#### Program interrupted by user. ####')\n close_instruments = True\n except:\n close_instruments = True\n if not silence_errors:\n raise\n finally:\n if close_instruments:\n self.close_instruments()\n\n\n def close_instruments(self):\n for scanner in self._scanner_axes:\n scanner.close()\n if self._detectors is not None:\n for detector in self._detectors:\n detector.close()\n\n\n\n def save_to_hdf5(self, file_name=None):\n\n d_obj = DO.DataObjectHDF5()\n d_obj.save_object_to_file (self, file_name)\n print(\"File saved\")\n\n def save_to_npz(self, file_name):\n np.savez(file_name+'.npz', self.counts)\n\n def plot_counts(self):\n\n if (self.detector_type == 'apd'):\n pl.figure(figsize=(10, 10))\n pl.pcolor(self.counts[0])\n pl.show()\n else:\n print(\"No counts available.. use APD\")\n\n\n def save_to_txt(self, file_name, array=None, flatten=True):\n if array is None:\n array = self.counts\n if flatten:\n pl.savetxt(file_name, np.array(array).flatten().transpose())\n else:\n pl.savetxt(file_name, array)\n print(\"\\nPower as volts saved in file.\")\n", "sub_path": "libs/__deprecated/mapper_old.py", "file_name": "mapper_old.py", "file_ext": "py", "file_size_in_byte": 9256, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pylab.floor", "line_number": 45, "usage_type": "call"}, {"api_name": "pylab.linspace", "line_number": 46, "usage_type": "call"}, {"api_name": "pylab.floor", "line_number": 49, "usage_type": "call"}, {"api_name": "pylab.linspace", "line_number": 50, "usage_type": "call"}, {"api_name": "pylab.array", "line_number": 54, "usage_type": "call"}, {"api_name": "pylab.zeros", "line_number": 61, "usage_type": "call"}, {"api_name": "pylab.floor", "line_number": 69, "usage_type": "call"}, {"api_name": "pylab.floor", "line_number": 70, "usage_type": "call"}, {"api_name": "time.time", "line_number": 75, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 86, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 91, "usage_type": "call"}, {"api_name": "measurements.libs.mapper_scanners.move_smooth", "line_number": 118, "usage_type": "call"}, {"api_name": "time.time", "line_number": 147, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 155, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 159, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 166, "usage_type": "call"}, {"api_name": "measurements.libs.mapper_scanners.move_smooth", "line_number": 182, "usage_type": "call"}, {"api_name": "tools.data_object.DataObjectHDF5", "line_number": 214, "usage_type": "call"}, {"api_name": "tools.data_object", "line_number": 214, "usage_type": "name"}, {"api_name": "numpy.savez", "line_number": 219, "usage_type": "call"}, {"api_name": "pylab.figure", "line_number": 224, "usage_type": "call"}, {"api_name": "pylab.pcolor", "line_number": 225, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 226, "usage_type": "call"}, {"api_name": "pylab.savetxt", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 235, "usage_type": "call"}, {"api_name": "pylab.savetxt", "line_number": 237, "usage_type": "call"}]} +{"seq_id": "46304320", "text": "\nfrom gensim import similarities\nimport os, logging\nimport numpy as np\nimport pandas as pd\n\nclass NeighborSampler():\n def __init__(self, chip_name, stage = 0, x_data=None):\n self.index = None\n self.chip_name = chip_name\n self.stage = stage\n self.build(x_data)\n\n def build(self, x_data=np.array([])):\n corpus = []\n for i in range(x_data.shape[0]):\n gensim_format_vec = []\n for j in range(x_data.shape[1]):\n gensim_format_vec.append((j, x_data[i][j]))\n corpus.append(gensim_format_vec)\n logging.info(\"#sample to build index: %s\" % x_data.shape[0])\n self.get_index(corpus,x_data.shape[1])\n\n def get_index(self, corpus=None, n_feature=None):\n self.index = similarities.Similarity(\"%s_%s_neighbor.index.tmp\" % (self.chip_name, self.stage), corpus, num_features=n_feature)\n return self.index\n\n def get_topk(self, vec, k):\n gensim_format_vec = []\n for i in range(len(vec)):\n gensim_format_vec.append((i, vec[i]))\n sims = self.index[gensim_format_vec]\n\n sim_sort = sorted(list(enumerate(sims)), key=lambda item: item[1])\n top_k = sim_sort[0:k]\n return top_k\n\ndef build_sampled_coexpression_matrix(x_data, filename, k = 1000):\n logging.info(\"Co-expression matrix building process starts.\")\n logging.info(\"data shape: %s %s\" % (x_data.shape[0], x_data.shape[1]))\n n_gene = x_data.shape[1]\n\n neighbor_sampler = NeighborSampler(filename, x_data = x_data)\n with open(\"matrix.txt\", \"w\") as f:\n for i in range(n_gene):\n list_neighbor = neighbor_sampler.get_topk(x_data[i], k)\n list_neighbor.extend(neighbor_sampler.get_topk(x_data[i], k))\n list_neighbor = list(set(list_neighbor))\n logging.info(\"sample %s's topk neighbor: %s\" % (i, list_neighbor))\n for j, value in list_neighbor:\n pearson_value = pearson(x_data[i], x_data[j])\n f.write(\"%s\\t%s\\t%s\\n\" % (i, j, pearson_value))\n f.flush()\n os.fsync(f.fileno())\n\n\ndef pearson(X, Y):\n return np.corrcoef(X, Y)[0][1]\n\nif __name__ == \"__main__\":\n x_data = pd.read_csv(\"F:\\gene2\\spartan2\\live-tutorials\\inputData\\example.csv\", sep=\",\", header=None)\n build_sampled_coexpression_matrix(x_data, \"out\")", "sub_path": "spartan/util/geneutil.py", "file_name": "geneutil.py", "file_ext": "py", "file_size_in_byte": 2348, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 21, "usage_type": "call"}, {"api_name": "gensim.similarities.Similarity", "line_number": 25, "usage_type": "call"}, {"api_name": "gensim.similarities", "line_number": 25, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 49, "usage_type": "call"}, {"api_name": "os.fsync", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.corrcoef", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "73577590", "text": "from utils import head\n\nclass Person:\n\t'This is Class1'\n\n\tcount = 0\n\n\tdef __init__(self, first, last):\n\t\tself.first = first\n\t\tself.last = last\n\t\tPerson.count += 1\n\n\tdef get_name(self):\n\t\treturn self.first + \" \" + self.last\n\n\tdef __del__(self):\n\t\tPerson.count-=1\n\n# instantiating classes\nhead(\"Creating Person\")\nperson = Person(\"Siddhesh\", \"Prabhu\")\nprint(person.get_name())\n# is equivalent to\nprint(Person.get_name(person))\n\n# use of class variables\nhead(\"Creating 4 additional persons\")\npersons = []\nfor i in range(1,5):\n\tpersons.append(Person(\"First\", \"Last\"))\n\nprint(\"Total persons =\", Person.count)\n\n# accessing attributes\nhead(\"Checking first name\")\nprint(hasattr(person, \"first\"))\n\nhead(\"Getting first name\")\nprint(getattr(person, \"first\"))\n\nhead(\"Setting first name\")\nsetattr(person, \"first\", \"Siddhu\")\nprint(person.get_name())\n\n# delattr(obj, name)\n\n# built in class attributes\nhead(\"Built-in class attributes\")\nprint(\"Namespace =\", Person.__dict__)\nprint(\"Documentation =\", Person.__doc__)\nprint(\"Name =\", Person.__name__)\nprint(\"Module =\", Person.__module__)\nprint(\"Bases =\", Person.__bases__)\n\n# destructor\nhead(\"Destructor\")\npersons.clear()\nprint(Person.count)\n\n# inheritance\nclass Employee(Person):\n\t\n\tdef __init__(self, first, last, salary):\n\t\tsuper().__init__(first, last)\n\t\tself.salary = salary\n\nhead(\"Inheritance\")\nemp = Employee(\"Sachin\", \"Tendulkar\", 100)\n# functions can be saved\nf = emp.get_name\nprint(f())\nprint(emp.salary)\n\n# multiple inheritance\nclass A:\n\tdef f(self):\n\t\tprint(\"A\")\n\nclass B:\n\tdef f(self):\n\t\tprint(\"B\")\n\nclass C(A,B):\n\tdef g(self):\n\t\tprint(\"C\")\n\nclass D(B,A):\n\tdef g(self):\n\t\tprint(\"D\")\n\n# conflicts are handled in order of declaration from left to right with DFS within each\nhead(\"Multiple Inheritance\")\nC().f()\nD().f()\n\nhead(\"isinstance & issubclass\")\no = C()\nprint(isinstance(o, C))\nprint(isinstance(o, D))\nprint(issubclass(C, A))\nprint(issubclass(C, D))\n\n# mangling - https://docs.python.org/3/tutorial/classes.html#private-variables\nclass A:\n\tdef __f(self):\n\t\tprint(\"__f\")\n\na = A()\ntry:\n\ta.__f()\nexcept AttributeError as e:\n\tprint(e)\n\n# Dynamic members\nclass A:\n\tpass\n\nhead(\"Dynamic attributes\")\na = A()\na.m1 = 1\na.m2 = 2\nprint(a.m1,a.m2)\n\nclass FunctionCopy(B,A):\n\tdef g(self):\n\t\tprint(\"D\")\n\tdef copy(self):\n\t\treturn self.g\n\n# You can return functions\nhead(\"Return Function\")\nFunctionCopy().copy()()", "sub_path": "oops.py", "file_name": "oops.py", "file_ext": "py", "file_size_in_byte": 2346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "utils.head", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.head", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.head", "line_number": 35, "usage_type": "call"}, {"api_name": "utils.head", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.head", "line_number": 41, "usage_type": "call"}, {"api_name": "utils.head", "line_number": 48, "usage_type": "call"}, {"api_name": "utils.head", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.head", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.head", "line_number": 92, "usage_type": "call"}, {"api_name": "utils.head", "line_number": 96, "usage_type": "call"}, {"api_name": "utils.head", "line_number": 118, "usage_type": "call"}, {"api_name": "utils.head", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "161859268", "text": "import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import CommandNotFound\nimport random\nimport post_create\nimport sql_handlers\n\n\n\nprefix = '!'\npika_bot = commands.Bot(command_prefix=prefix)\npika_bot.remove_command('help')\n\n@pika_bot.command()\nasync def help(ctx):\n emb = discord.Embed(title='Commands', colour=random.randint(0, 0xFFFF))\n emb.add_field(name='Use this for get a meme', value=\"{}pika\".format(prefix))\n await ctx.send(embed=emb)\n\n@pika_bot.event\nasync def on_command_error(ctx, error):\n if isinstance(error, CommandNotFound):\n await ctx.send('Oops... You entered the wrong command. Use !help for help.')\n\n@pika_bot.command()\nasync def pika(ctx):\n fct_counts = sql_handlers.fct_counts()\n if fct_counts == 0:\n await ctx.send('Обновляю мемасы, подождите!')\n sql_handlers.truncate_fct_parsed_post()\n sql_handlers.truncate_fct_used_article()\n sql_handlers.stg_to_fct_replication()\n post_raw = post_create.create_post()\n sql_handlers.article_id_insert(post_raw)\n emb = discord.Embed(colour=random.randint(0, 0xFFFF))\n emb.set_image(url=post_raw[4])\n emb.set_author(name=post_raw[1], url=post_raw[2])\n emb.set_footer(text=post_raw[3], icon_url='http://pngimg.com/uploads/fallout/fallout_PNG64.png')\n emb.add_field(name='Автор', value=post_raw[5])\n emb.add_field(name='Комментарии', value=post_raw[6], inline=True)\n await ctx.send(embed=emb)\n else:\n post_raw = post_create.create_post()\n sql_handlers.article_id_insert(post_raw)\n emb = discord.Embed(colour=random.randint(0, 0xFFFF))\n emb.set_image(url=post_raw[4])\n emb.set_author(name=post_raw[1], url=post_raw[2])\n emb.set_footer(text=post_raw[3], icon_url='http://pngimg.com/uploads/fallout/fallout_PNG64.png')\n emb.add_field(name='Автор', value=post_raw[5])\n emb.add_field(name='Комментарии', value=post_raw[6], inline=True)\n await ctx.send(embed=emb)\n\n\npika_bot.run(open('token.txt', 'r').readline())\n", "sub_path": "discord_pika_bot.py", "file_name": "discord_pika_bot.py", "file_ext": "py", "file_size_in_byte": 2122, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 11, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 11, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 16, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 16, "usage_type": "call"}, {"api_name": "discord.ext.commands.CommandNotFound", "line_number": 22, "usage_type": "argument"}, {"api_name": "sql_handlers.fct_counts", "line_number": 27, "usage_type": "call"}, {"api_name": "sql_handlers.truncate_fct_parsed_post", "line_number": 30, "usage_type": "call"}, {"api_name": "sql_handlers.truncate_fct_used_article", "line_number": 31, "usage_type": "call"}, {"api_name": "sql_handlers.stg_to_fct_replication", "line_number": 32, "usage_type": "call"}, {"api_name": "post_create.create_post", "line_number": 33, "usage_type": "call"}, {"api_name": "sql_handlers.article_id_insert", "line_number": 34, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 35, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 35, "usage_type": "call"}, {"api_name": "post_create.create_post", "line_number": 43, "usage_type": "call"}, {"api_name": "sql_handlers.article_id_insert", "line_number": 44, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 45, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "653890339", "text": "from django import template\nfrom pages.models import Pages\nfrom menu.models import Menu\nregister = template.Library()\n\n\n@register.inclusion_tag('mainmenu.html')\ndef mainmenu(menu_select, home=False):\n\tmenu = Menu.objects.filter(menu_select=menu_select).order_by('order')\n\t# print menu\n\tmenu_pages = [item.name.id for item in menu]\n\t# print menu_pages\n\tpages = Pages.objects.filter(id__in=menu_pages)\n\t# print pages\n\t# data['pages'] = pages\n\tdata = {'pages': pages}\n\tdata['home_show'] = home\n\treturn data\n", "sub_path": "pages/templatetags/mainmenu.py", "file_name": "mainmenu.py", "file_ext": "py", "file_size_in_byte": 504, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.template.Library", "line_number": 4, "usage_type": "call"}, {"api_name": "django.template", "line_number": 4, "usage_type": "name"}, {"api_name": "menu.models", "line_number": 9, "usage_type": "name"}, {"api_name": "menu.models.Menu.objects.filter", "line_number": 9, "usage_type": "call"}, {"api_name": "menu.models.Menu.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "menu.models.Menu", "line_number": 9, "usage_type": "name"}, {"api_name": "menu.models", "line_number": 11, "usage_type": "name"}, {"api_name": "pages.models", "line_number": 13, "usage_type": "name"}, {"api_name": "pages.models.Pages.objects.filter", "line_number": 13, "usage_type": "call"}, {"api_name": "pages.models.Pages.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pages.models.Pages", "line_number": 13, "usage_type": "name"}, {"api_name": "pages.models", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "219201868", "text": "# Tester file for the aerotbx.utils python files.\n# Simply calls the functions and checks that they give the correct output\n# @author = Matt (matthew@andreini.us)\n\nimport aerotbx.utils\nimport scipy as sp\n\n# int test\ninteg = 2\ninteg_array = aerotbx.utils.to_ndarray(integ)\nbinteg = aerotbx.utils.from_ndarray(*integ_array)\n\n# float test\nflt = 5.0\nflt_array = aerotbx.utils.to_ndarray(flt)\nbflt = aerotbx.utils.from_ndarray(*flt_array)\n\n# list test\nlst = [1, 4, 5]\nlst_array = aerotbx.utils.to_ndarray(lst)\nblst = aerotbx.utils.from_ndarray(*lst_array)\n\n# tuple test\ntup = (9,8,7)\ntup_array = aerotbx.utils.to_ndarray(tup)\nbtup = aerotbx.utils.from_ndarray(*tup_array)\n\n# array test\narr = sp.array([2,4])\narr_array = aerotbx.utils.to_ndarray(arr)\nbarr = aerotbx.utils.from_ndarray(*arr_array)\n\nassert binteg==integ, \"function 'from_ndarray' failed on %s\" % type(integ)\nassert bflt==flt, \"function 'from_ndarray' failed on %s\" % type(flt)\nassert blst==lst, \"function 'from_ndarray' failed on %s\" % type(lst)\nassert btup==tup, \"function 'from_ndarray' failed on %s\" % type(tup)\nassert sp.array_equal(arr, barr), \"function 'from_ndarray' failed on %s\" % type(arr)\n\nprint(\"All tests pass.\") # passes\n\n\n\n", "sub_path": "tests/utils_tests.py", "file_name": "utils_tests.py", "file_ext": "py", "file_size_in_byte": 1198, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "aerotbx.utils.utils.to_ndarray", "line_number": 10, "usage_type": "call"}, {"api_name": "aerotbx.utils.utils", "line_number": 10, "usage_type": "attribute"}, {"api_name": "aerotbx.utils", "line_number": 10, "usage_type": "name"}, {"api_name": "aerotbx.utils.utils.from_ndarray", "line_number": 11, "usage_type": "call"}, {"api_name": "aerotbx.utils.utils", "line_number": 11, "usage_type": "attribute"}, {"api_name": "aerotbx.utils", "line_number": 11, "usage_type": "name"}, {"api_name": "aerotbx.utils.utils.to_ndarray", "line_number": 15, "usage_type": "call"}, {"api_name": "aerotbx.utils.utils", "line_number": 15, "usage_type": "attribute"}, {"api_name": "aerotbx.utils", "line_number": 15, "usage_type": "name"}, {"api_name": "aerotbx.utils.utils.from_ndarray", "line_number": 16, "usage_type": "call"}, {"api_name": "aerotbx.utils.utils", "line_number": 16, "usage_type": "attribute"}, {"api_name": "aerotbx.utils", "line_number": 16, "usage_type": "name"}, {"api_name": "aerotbx.utils.utils.to_ndarray", "line_number": 20, "usage_type": "call"}, {"api_name": "aerotbx.utils.utils", "line_number": 20, "usage_type": "attribute"}, {"api_name": "aerotbx.utils", "line_number": 20, "usage_type": "name"}, {"api_name": "aerotbx.utils.utils.from_ndarray", "line_number": 21, "usage_type": "call"}, {"api_name": "aerotbx.utils.utils", "line_number": 21, "usage_type": "attribute"}, {"api_name": "aerotbx.utils", "line_number": 21, "usage_type": "name"}, {"api_name": "aerotbx.utils.utils.to_ndarray", "line_number": 25, "usage_type": "call"}, {"api_name": "aerotbx.utils.utils", "line_number": 25, "usage_type": "attribute"}, {"api_name": "aerotbx.utils", "line_number": 25, "usage_type": "name"}, {"api_name": "aerotbx.utils.utils.from_ndarray", "line_number": 26, "usage_type": "call"}, {"api_name": "aerotbx.utils.utils", "line_number": 26, "usage_type": "attribute"}, {"api_name": "aerotbx.utils", "line_number": 26, "usage_type": "name"}, {"api_name": "scipy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "aerotbx.utils.utils.to_ndarray", "line_number": 30, "usage_type": "call"}, {"api_name": "aerotbx.utils.utils", "line_number": 30, "usage_type": "attribute"}, {"api_name": "aerotbx.utils", "line_number": 30, "usage_type": "name"}, {"api_name": "aerotbx.utils.utils.from_ndarray", "line_number": 31, "usage_type": "call"}, {"api_name": "aerotbx.utils.utils", "line_number": 31, "usage_type": "attribute"}, {"api_name": "aerotbx.utils", "line_number": 31, "usage_type": "name"}, {"api_name": "scipy.array_equal", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "553564699", "text": "import urlparse\n\nfrom google.appengine.ext import db\n\nfrom model.enum import Enum\nfrom model.song import Song\n\nfrom reptest.testcase import TestCase\n\nclass Link(db.Model):\n Type = Enum(Video='Video',\n Lesson='Lesson',\n Sheet='Sheet',\n Tab='Tab',\n Audio='Audio')\n\n type = db.StringProperty()\n url = db.LinkProperty()\n\n created = db.DateTimeProperty(auto_now_add=True)\n\n song = db.ReferenceProperty(Song,\n collection_name='links')\n \n def shortUrl(self):\n return str.join('.', urlparse.urlparse(self.url).netloc.split('.')[-2:])\n\nclass LinkTest(TestCase):\n def test(self):\n song = Song(name='Dust In The Wind',\n artist='Kansas')\n song.put()\n\n link1 = Link(url='http://youtube.com/watch?v=tH2w6Oxx0kQ',\n type=Link.Type.Video,\n song=song)\n link1.put()\n\n link2 = Link(url='http://tabs.ultimate-guitar.com/k/kansas/dust_in_the_wind_ver4_tab.htm',\n type=Link.Type.Tab,\n song=song)\n link2.put()\n\n self.assertEqual(2, song.links.count())\n\nif __name__ == '__main__':\n TestCase.main()\n", "sub_path": "model/link.py", "file_name": "link.py", "file_ext": "py", "file_size_in_byte": 1246, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "google.appengine.ext.db.Model", "line_number": 10, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db", "line_number": 10, "usage_type": "name"}, {"api_name": "model.enum.Enum", "line_number": 11, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.StringProperty", "line_number": 17, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 17, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.LinkProperty", "line_number": 18, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 18, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.DateTimeProperty", "line_number": 20, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 20, "usage_type": "name"}, {"api_name": "google.appengine.ext.db.ReferenceProperty", "line_number": 22, "usage_type": "call"}, {"api_name": "model.song.Song", "line_number": 22, "usage_type": "argument"}, {"api_name": "google.appengine.ext.db", "line_number": 22, "usage_type": "name"}, {"api_name": "urlparse.urlparse", "line_number": 26, "usage_type": "call"}, {"api_name": "reptest.testcase.TestCase", "line_number": 28, "usage_type": "name"}, {"api_name": "model.song.Song", "line_number": 30, "usage_type": "call"}, {"api_name": "reptest.testcase.TestCase.main", "line_number": 47, "usage_type": "call"}, {"api_name": "reptest.testcase.TestCase", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "358664448", "text": "from django.test import TestCase\nfrom django.utils.text import slugify\n\nfrom .factories import PostFactory, TagFactory\n\n\nclass PostModelTests(TestCase):\n\n def test_updating_post_title_updates_post_slug_accordingly(self):\n post = PostFactory(title=u'old title')\n post.title = u'new title'\n post.save()\n self.assertEqual(post.slug, slugify(u'new title'))\n\n\nclass TagModelTests(TestCase):\n\n def test_updating_tag_name_updates_tag_slug_accordingly(self):\n tag = TagFactory(name=u'Larry Wachowski')\n tag.name = u'Lana Wachowski'\n tag.save()\n self.assertEqual(tag.slug, slugify(u'Lana Wachowski'))\n\n def test_updating_tag_capitalization_leaves_slug_as_is(self):\n tag = TagFactory(name=u'laura marling')\n tag.name = u'Laura Marling'\n tag.save()\n self.assertEqual(tag.slug, slugify(u'laura marling'))\n", "sub_path": "blog/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 886, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.test.TestCase", "line_number": 7, "usage_type": "name"}, {"api_name": "factories.PostFactory", "line_number": 10, "usage_type": "call"}, {"api_name": "django.utils.text.slugify", "line_number": 13, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 16, "usage_type": "name"}, {"api_name": "factories.TagFactory", "line_number": 19, "usage_type": "call"}, {"api_name": "django.utils.text.slugify", "line_number": 22, "usage_type": "call"}, {"api_name": "factories.TagFactory", "line_number": 25, "usage_type": "call"}, {"api_name": "django.utils.text.slugify", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "475914915", "text": "#!/usr/bin/env python3\nimport numpy as np\nfrom numpy import linalg as la\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib\nimport math\n\n# def get_fig():\n# return plt.figure()\n\n# def get_ax(fig, rows=1, cols=1, index=1)\n# return fig.add_subplot(int(str(rows)+str(cols)+str(index)))\n\ndef side_by_side(title=None):\n fig = plt.figure()\n ax1 = fig.add_subplot(121)\n ax2 = fig.add_subplot(122)\n if title is not None: fig.suptitle(title)\n return ax1,ax2\n\ndef not_side_by_side(title=None):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if title is not None: fig.suptitle(title)\n return ax,fig\n\n\ndef simple_plot(ax, ydata, xdata=None, xlabel=None, ylabel=None, title=None):\n xdata = xdata or range(len(ydata))\n line, = ax.plot(xdata, ydata)\n if xlabel is not None: ax.set_xlabel(xlabel)\n if ylabel is not None: ax.set_ylabel(ylabel)\n if title is not None: ax.set_title(title)\n\n\n\ndef plot_spectrum(ax, spectrum_list, means, f0, fpb, xlabel=\"Frequency (Hz)\", ylabel=\"Sound Pressure\", title=None):\n left, right = max([*means,0]), min([*means,0])\n xmin, xmax = f0-left-7*fpb, f0-right+7*fpb\n min_bin, max_bin = round(int(xmin)/fpb), round(int(xmax)/fpb)\n xdata = np.array(range(min_bin, max_bin))*fpb#-f0+200\n lines = []\n for i,s in enumerate(spectrum_list):\n line, = ax.plot(xdata,np.array(s[min_bin:max_bin]), label=\"$S_{}$\".format(i))\n lines.append(line)\n offset = max([j for i in spectrum_list for j in i])*0.01\n f0_amplitude = max([spectrum[round(f0/fpb)] for spectrum in spectrum_list])\n ax.annotate('$f_0$', xy=(f0, f0_amplitude), xytext=(f0, f0_amplitude+offset), ha=\"center\")\n for i,(s,x) in enumerate(zip(spectrum_list,means)):\n x = f0-x\n index = round(int(x)/fpb)\n ax.annotate('$S_{}$'.format(i), xy=(x, s[index]), xytext=(x, s[index]+offset), ha='center')\n ax.legend()\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel) \n if title is not None: ax.set_title(title)\n\ndef maintain_bounds(xmin,xmax,ymin,ymax,x,y,u,v):\n xmin = x-u - 0.25 if x-u - 0.25 < xmin else xmin\n xmax = x+u + 0.25 if x+u + 0.25 > xmax else xmax\n ymin = y-v - 0.25 if y-v - 0.25 < ymin else ymin\n ymax = y+v + 0.25 if y+v + 0.25 > ymax else ymax\n return xmin,xmax,ymin,ymax\n\ndef plot_layout(ax, sensors, q=None, v=None, vr=None, vd=None, sq=None, sv=None, xlabel=\"X (meters)\", ylabel=\"Y (meters)\", title=None, leg=\"upper left\"):\n xmin, xmax = ax.get_xlim()\n ymin, ymax = ax.get_ylim()\n offset = 0.2\n \n px,py = zip(*sensors)\n ax.scatter(px,py)\n if vr is None:\n for i,(x,y) in enumerate(sensors):\n ax.annotate(\"$S_{}$\".format(i), xy=(x,y), xytext=(x, y+offset), ha='center')\n xmin,xmax,ymin,ymax = maintain_bounds(xmin,xmax,ymin,ymax,x,y,0,offset)\n\n if q is not None:\n px,py = q[0], q[1]\n ax.scatter(px,py)\n if v is None:\n ax.annotate(\"$Q$\", xy=(px,py), xytext=(px, py+offset), ha='center')\n xmin,xmax,ymin,ymax = maintain_bounds(xmin,xmax,ymin,ymax,x,y,0,offset)\n\n if v is not None and q is not None:\n vx,vy,vu,vv = [*q,*v]\n ax.quiver(vx,vy,vu,vv, angles='xy', scale_units='xy', scale=1, color='c', alpha=0.5)\n xmin,xmax,ymin,ymax = maintain_bounds(xmin,xmax,ymin,ymax,vx,vy,vu,vv)\n\n pos = q - (v)/la.norm(v)*offset\n ax.annotate(\"$Q$\", xy=tuple(q), xytext=tuple(pos), ha='center', va='center')\n xmin,xmax,ymin,ymax = maintain_bounds(xmin,xmax,ymin,ymax,q[0],q[1],pos[0]-q[0],pos[1]-q[1])\n\n cyan_patch = mpatches.Patch(color='cyan', label='Ideal', alpha=0.5)\n ax.legend(handles=[cyan_patch], loc='upper left')\n\n if vr is not None:\n for i,(s,v_rad) in enumerate(zip(sensors,vr)):\n if v_rad > 0:\n r = (q-s)/la.norm(q-s)\n proj = np.dot(r,v)*r\n vx,vy,vu,vv = [*s,*proj]\n pos = s - proj/la.norm(proj) * offset\n else:\n r = (q-s)/la.norm(q-s)\n proj = np.dot(r,v)*r\n start = s - proj\n vx,vy,vu,vv = [*start,*proj]\n pos = s + proj/la.norm(proj) * offset\n # print(proj)\n \n ax.quiver(vx,vy,vu,vv, angles='xy', scale_units='xy', scale=1, color='c', alpha=0.5)\n xmin,xmax,ymin,ymax = maintain_bounds(xmin,xmax,ymin,ymax,vx,vy,vu,vv)\n\n \n ax.annotate(\"$S_{}$\".format(i), xy=tuple(s), xytext=tuple(pos), ha='center', va='center')\n xmin,xmax,ymin,ymax = maintain_bounds(xmin,xmax,ymin,ymax,s[0],s[1],pos[0]-s[0],pos[1]-s[1])\n\n cyan_patch = mpatches.Patch(color='cyan', label='Ideal', alpha=0.5)\n ax.legend(handles=[cyan_patch], loc='upper left')\n\n if vd is not None:\n for i,(s,v_dop) in enumerate(zip(sensors,vd)):\n if v_dop > 0:\n r = (q-s)/la.norm(q-s)\n proj = np.dot(r,v)*r\n vx,vy,vu,vv = [*s,*proj]\n pos = s - proj/la.norm(proj) * offset\n else:\n r = (q-s)/la.norm(q-s)\n proj = np.dot(r,v)*r\n start = s - proj\n vx,vy,vu,vv = [*start,*proj]\n pos = s + proj/la.norm(proj) * offset\n # print(proj)\n \n ax.quiver(vx,vy,vu,vv, angles='xy', scale_units='xy', scale=1, color='m', alpha=0.5)\n xmin,xmax,ymin,ymax = maintain_bounds(xmin,xmax,ymin,ymax,vx,vy,vu,vv)\n\n magenta_patch = mpatches.Patch(color='magenta', label='Simulated', alpha=0.5)\n ax.legend(handles=[cyan_patch, magenta_patch], loc=leg)\n\n if sq is not None: \n px,py = sq[0], sq[1]\n ax.scatter(px,py)\n if sv is None:\n ax.annotate(\"$\\\\hat Q$\", xy=(px,py), xytext=(px, py+offset), ha='center')\n xmin,xmax,ymin,ymax = maintain_bounds(xmin,xmax,ymin,ymax,x,y,0,offset)\n else:\n sq = q\n\n if sv is not None:\n vx,vy,vu,vv = [*sq,*sv]\n ax.quiver(vx,vy,vu,vv, angles='xy', scale_units='xy', scale=1, color='m', alpha=0.5)\n xmin,xmax,ymin,ymax = maintain_bounds(xmin,xmax,ymin,ymax,vx,vy,vu,vv)\n\n if sq is not None:\n pos = sq - (sv)/la.norm(v)*offset\n ax.annotate(\"$\\\\hat Q$\", xy=tuple(q), xytext=tuple(pos), ha='center', va='center')\n xmin,xmax,ymin,ymax = maintain_bounds(xmin,xmax,ymin,ymax,q[0],q[1],pos[0]-q[0],pos[1]-q[1])\n\n magenta_patch = mpatches.Patch(color='magenta', label='Simulated', alpha=0.5)\n ax.legend(handles=[cyan_patch, magenta_patch], loc=leg)\n\n ax.set_xlim(xmin,xmax)\n ax.set_ylim(ymin,ymax)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel) \n if title is not None: ax.set_title(title)\n\ndef plot_error_map(ax, xs, ys, grid, title=\"Heat Map\"):\n ax.imshow(grid)\n ax.set_xticks(np.arange(len(xs)))\n ax.set_yticks(np.arange(len(ys)))\n ax.set_xticklabels([\"{:.2f}\".format(x) for x in xs])\n ax.set_yticklabels([\"{:.2f}\".format(y) for y in ys])\n ax.invert_yaxis()\n for i in range(len(xs)):\n for j in range(len(ys)):\n text = ax.text(j, i, \"{:.2f}\".format(grid[i, j]),\n ha=\"center\", va=\"center\", color=\"w\")\n \n", "sub_path": "plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 7243, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 94, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 100, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 103, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 105, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 119, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 128, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 140, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 162, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 174, "usage_type": "call"}]} +{"seq_id": "520855720", "text": "# -*- encoding:utf-8 -*-\n\nfrom sklearn.gaussian_process import GaussianProcess\nfrom sklearn.cross_validation import cross_val_score, KFold\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nfrom pandas import read_table\nimport sys\nimport time\n\ndef GaussProcess(dataset):\n\n\n train_data = read_table(dataset[0], sep=',')\n valid_data = read_table(dataset[1], sep=',')\n test_data = read_table(dataset[2], sep=',') # no label\n\n trainX = train_data.iloc[:,0:-1]\n validX = valid_data.iloc[:,0:-1]\n trainY = train_data.iloc[:,-1]\n validY = valid_data.iloc[:,-1]\n\n scaler = StandardScaler().fit(trainX) # fit只是计算mean var输出的是一个模型, fit_transform是带变换的,输出带结果\n trainX = scaler.fit_transform(trainX)\n validX = scaler.fit_transform(validX)\n testX = scaler.fit_transform(test_data)\n\n gp = GaussianProcess('quadratic', 'absolute_exponential',\n theta0=[1e-4] * 10, thetaL=[1e-12] * 10,\n thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch',\n random_start=100)\n\n print >> sys.stderr, \"start training Gausian Process\"\n start_time = time.time()\n gp.fit(trainX,trainY)\n print >> sys.stderr, 'SVM train time use:', time.time() - start_time, 'seconds'\n\n valid_, vMSE = gp.predict(validX, eval_MSE=True)\n test_, tMSE = gp.predict(testX, eval_MSE=True)\n\n valid_output = np.hstack((valid_.reshape(valid_.shape[0], 1), vMSE.reshape(vMSE.shape[0], 1)))\n test_output = np.hstack((test_.reshape(test_.shape[0], 1), tMSE.reshape(tMSE.shape[0], 1)))\n\n np.savetxt(\"valid_GP_result.csv\", valid_output, fmt=\",\".join([\"%.6f\"]*2), comments='',header=\"value,variance\")\n np.savetxt(\"test_GP_result.csv\", test_output, fmt=\",\".join([\"%.6f\"]*2), comments='',header=\"value, variance\")\n\n print >> sys.stderr, \"R2 on Valid: %f\" % gp.score(validX, validY)\n\nif __name__ == '__main__':\n GaussProcess(dataset=['train.csv','valid.csv','test.csv'])\n\n\n", "sub_path": "Mark/GaussiaProcess_Mark.py", "file_name": "GaussiaProcess_Mark.py", "file_ext": "py", "file_size_in_byte": 1991, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_table", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.gaussian_process.GaussianProcess", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 33, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 36, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 47, "usage_type": "attribute"}]} +{"seq_id": "318339127", "text": "import numpy as np\n# from qiskit import qiskit_code\n# from rigetti import rigetti_code\nfrom pyquil.quil import Program\nfrom pyquil.gates import RX, RZ, CZ, RESET, MEASURE\n\n### This code assumes qubits are labeled 0,1,2,...,N and connected in that order ###\n\n\ndef smart_compile(program, nqubits):\n lineList = [str(instr) for instr in program]\n count = len(lineList)\n\n # Read the gate in right vector form\n # G = Gate type\n # TH = Angle of rotation ! if no angle rotation then TH = 0\n # AC1 = qubit on which action is happening\n # AC2 = qubit on which controlled action is happening\n\n G = [\"\" for x in range(count)]\n G = list(G)\n AC1 = np.zeros(shape=(count), dtype=np.int)\n AC2 = np.zeros(shape=(count), dtype=np.int)\n TH = np.zeros(shape=(count))\n for i in range(0, count):\n G[i] = 0\n TH[i] = 0\n AC1[i] = 0\n AC2[i] = 0\n if lineList[i][0:1] == \"H\":\n G[i] = \"H\"\n TH[i] = 0\n AC1[i] = lineList[i][2:3]\n AC2[i] = 0\n if lineList[i][0:2] == \"RZ\":\n G[i] = \"RZ\"\n TH[i] = lineList[i][lineList[i].find(\"(\")+1:lineList[i].find(\")\")]\n AC1[i] = lineList[i][-1]\n AC2[i] = 0\n if lineList[i][0:4] == \"CNOT\":\n G[i] = \"CNOT\"\n TH[i] = 0\n AC1[i] = lineList[i][5:6]\n AC2[i] = lineList[i][7:8]\n if lineList[i][0:7] == \"MEASURE\":\n G[i] = \"MEASURE\"\n TH[i] = 0\n AC1[i] = 0\n AC2[i] = 0\n\n # qiskit_code(G,TH,AC1,AC2,\"qiskit_uncompressed.txt\")\n # rigetti_code(G,TH,AC1,AC2,\"rigetti_uncompressed.txt\")\n\n # Use CNOT = H CZ H\n i = 0\n while G[i] != \"MEASURE\":\n if G[i] == \"CNOT\":\n G[i] = \"CZ\"\n G.insert(i+1, \"H\")\n TH = np.insert(TH, i+1, 0)\n AC1 = np.insert(AC1, i+1, AC2[i])\n AC2 = np.insert(AC2, i+1, 0)\n G.insert(i, \"H\")\n TH = np.insert(TH, i, 0)\n AC1 = np.insert(AC1, i, AC2[i])\n AC2 = np.insert(AC2, i, 0)\n i = i+1\n\n # Last and second last CNOT can be ommited\n maxq = max(max(AC1), max(AC2))\n remember = np.zeros(shape=(2, maxq), dtype=np.int)\n for mm in range(0, maxq+1):\n i = 0\n while G[i] != \"MEASURE\":\n if G[i] == \"CZ\" and AC1[i] == mm and AC2[i] == mm+1:\n j = i+1\n while G[j] != \"MEASURE\":\n if G[j] == \"CZ\" and AC1[j] == mm and AC2[j] == mm+1:\n remember[0][mm] = i\n remember[1][mm] = j\n j = j+1\n i = i+1\n\n for nn in range(maxq-1, -1, -1):\n for mm in range(1, -1, -1):\n # print(mm,nn)\n del G[remember[mm][nn]]\n TH = np.delete(TH, remember[mm][nn])\n AC1 = np.delete(AC1, remember[mm][nn])\n AC2 = np.delete(AC2, remember[mm][nn])\n\n # Use H*H = I but make sure it can only happen if no gate is\n # present in between\n i = 0\n while G[i] != \"MEASURE\":\n if G[i] == \"H\":\n flag = 0\n #print(G[i],TH[i],AC1[i],AC2[i],\"before start\")\n j = i+1\n while G[j] != \"MEASURE\":\n if ((G[j] == \"CZ\" and AC1[j] == AC1[i]) or (G[j] == \"CZ\" and AC2[j] == AC1[i]) or (G[j] == \"RZ\" and AC1[j] == AC1[i])):\n break\n if G[j] == G[i] and AC1[j] == AC1[i]:\n # print(G[i],TH[i],AC1[i],AC2[i],\"before\")\n del G[j]\n TH = np.delete(TH, j)\n AC1 = np.delete(AC1, j)\n AC2 = np.delete(AC2, j)\n # print(G[i],TH[i],AC1[i],AC2[i],\"after\")\n del G[i]\n TH = np.delete(TH, i)\n AC1 = np.delete(AC1, i)\n AC2 = np.delete(AC2, i)\n flag = 2\n j = j+1\n if flag == 2:\n break\n i = i + 1\n\n # Use CZ H RZ H CZ = RZ(pi/2) CZ RX(pi/2) RZ RX(-pi2) CZ RZ(-pi/2)\n i = 0\n while G[i] != \"MEASURE\":\n if (G[i] == \"CZ\" and G[i+1] == \"H\" and AC2[i] == AC1[i+1] and G[i+2] == \"RZ\" and AC2[i] == AC1[i+2] and G[i+3] == \"H\" and AC2[i] == AC1[i+3] and G[i+4] == \"CZ\" and AC2[i] == AC2[i+4]):\n G[i+1] = \"RX\"\n TH[i+1] = 1.57079632679\n G[i+3] = \"RX\"\n TH[i+3] = -1.57079632679\n G.insert(i+5, \"RZ\")\n TH = np.insert(TH, i+5, -1.57079632679)\n AC1 = np.insert(AC1, i+5, AC2[i])\n AC2 = np.insert(AC2, i+5, 0)\n G.insert(i, \"RZ\")\n TH = np.insert(TH, i, 1.57079632679)\n AC1 = np.insert(AC1, i, AC2[i])\n AC2 = np.insert(AC2, i, 0)\n i = i+1\n\n # Use H = RZ(pi/2) RX(pi/2) RZ(pi/2)\n i = 0\n while G[i] != \"MEASURE\":\n if (G[i] == \"H\"):\n flag = AC1[i]\n G[i] = \"RZ\"\n TH[i] = 1.57079632679\n G.insert(i, \"RX\")\n TH = np.insert(TH, i, 1.57079632679)\n AC1 = np.insert(AC1, i, flag)\n AC2 = np.insert(AC2, i, 0)\n G.insert(i, \"RZ\")\n TH = np.insert(TH, i, 1.57079632679)\n AC1 = np.insert(AC1, i, flag)\n AC2 = np.insert(AC2, i, 0)\n i = i+1\n\n # Compress RZ gates\n loop_flag = 0\n for mm in range(0, 1000):\n i = 0\n while G[i] != \"MEASURE\":\n if (G[i] == \"RZ\"):\n j = i+1\n flag = 0\n # print(flag,\"flag\")\n while G[j] != \"MEASURE\":\n if (G[j] == \"RX\" and AC1[j] == AC1[i]):\n flag = 2\n if (G[j] == \"RZ\" and AC1[j] == AC1[i]):\n TH[i] = TH[i]+TH[j]\n del G[j]\n TH = np.delete(TH, j)\n AC1 = np.delete(AC1, j)\n AC2 = np.delete(AC2, j)\n flag = 2\n loop_flag = 3\n j = j+1\n if(flag == 2):\n break\n if (G[i] == \"RZ\" and TH[i] == 0.0):\n del G[i]\n TH = np.delete(TH, i)\n AC1 = np.delete(AC1, i)\n AC2 = np.delete(AC2, i)\n i = i + 1\n if(loop_flag == 0):\n break\n if(mm == 1000 and loop_flag == 3):\n print(\"more RZ compression are left be carefull!!\")\n\n # qiskit_code(G,TH,AC1,AC2,\"qiskit_compressed.txt\")\n # rigetti_code(G,TH,AC1,AC2,\"rigetti_compressed.txt\")\n\n p = Program(RESET()) # compressed program\n ro = p.declare('ro', memory_type='BIT', memory_size=nqubits)\n\n for i in range(len(G)):\n if (G[i] == \"RX\"):\n p.inst(RX(TH[i], int(AC1[i])))\n if (G[i] == \"RZ\"):\n p.inst(RZ(TH[i], int(AC1[i])))\n if (G[i] == \"CZ\"):\n p.inst(CZ(int(AC1[i]), int(AC2[i])))\n for i in range(0, nqubits):\n p.inst(MEASURE(i, ro[i]))\n return p\n", "sub_path": "smart_compile.py", "file_name": "smart_compile.py", "file_ext": "py", "file_size_in_byte": 7047, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.delete", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 182, "usage_type": "call"}, {"api_name": "pyquil.quil.Program", "line_number": 192, "usage_type": "call"}, {"api_name": "pyquil.gates.RESET", "line_number": 192, "usage_type": "call"}, {"api_name": "pyquil.gates.RX", "line_number": 197, "usage_type": "call"}, {"api_name": "pyquil.gates.RZ", "line_number": 199, "usage_type": "call"}, {"api_name": "pyquil.gates.CZ", "line_number": 201, "usage_type": "call"}, {"api_name": "pyquil.gates.MEASURE", "line_number": 203, "usage_type": "call"}]} +{"seq_id": "234614095", "text": "import requests\nfrom os.path import join\nimport sys\nimport json\n\n\ndef _raise_error(msg, exit_status=1):\n sys.stderr.write(msg)\n sys.exit(exit_status)\n\n\ndef _get_request_json(url):\n res = requests.get(url)\n if res.status_code != 200:\n msg = 'res.status_code != 200 for GET {0}'.format(res.url)\n if hasattr(res, 'reason'):\n msg += '\\n' + res.reason\n if hasattr(res, 'text'):\n msg += '\\n' + str(res.text)\n _raise_error(msg)\n return res.json()\n\n\ndef _put_request_json(url, data):\n res = requests.put(url, data=data)\n if res.status_code != 200:\n msg = 'res.status_code != 200 for PUT {0}'.format(res.url)\n if hasattr(res, 'reason'):\n msg += '\\n' + res.reason\n if hasattr(res, 'text'):\n msg += '\\n' + str(res.text)\n _raise_error(msg)\n return res.json()\n\n\ndef deregister_failed_nodes_with_no_services(consul_server_url, delete=False, datacenter=None):\n datacenters = _get_request_json(join(consul_server_url, 'v1/catalog/datacenters'))\n\n if len(datacenters) == 0:\n _raise_error('could not find any datacenters')\n\n if datacenter is not None and datacenter not in datacenters:\n _raise_error('could not the datacenter:{0} in {1}'.format(datacenter, datacenters))\n\n if datacenter is None:\n if len(datacenters) != 1:\n _raise_error('more than one dataceneter found, please state which one {0}'.format(datacenter))\n datacenter = datacenters[0]\n\n nodes = _get_request_json(join(consul_server_url, 'v1/catalog/nodes'))\n for node in nodes:\n\n if 'Node' not in node or 'Address' not in node:\n continue\n\n node_key = node['Node']\n node_address = node['Address']\n\n # check node health equals 'critical'\n node_health_checks = _get_request_json(join(consul_server_url, 'v1/health/node/{0}'.format(node_key)))\n if len(node_health_checks) != 1:\n continue\n\n node_health_check = node_health_checks[0]\n if 'Status' not in node_health_check or node_health_check['Status'] != 'critical':\n continue\n\n # check node has no services\n node_info = _get_request_json(join(consul_server_url, 'v1/catalog/node/{0}'.format(node_key)))\n if 'Services' not in node_info or len(node_info['Services']) != 0:\n continue\n\n # delete / deregister node\n if not delete:\n print('will remove: {0} {1}'.format(node_key, node_address))\n else:\n print('removing: {0} {1}'.format(node_key, node_address))\n _put_request_json(\n url=join(consul_server_url, 'v1/catalog/deregister'),\n data=json.dumps({'Node': node_key, 'Datacenter': datacenter}))\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description='Deregisters failed nodes that have no services associated with it')\n parser.add_argument('url', help='http://localhost:8500')\n parser.add_argument('--delete', action=\"store_true\", default=False, help='deregister / delete the node from consul')\n args = parser.parse_args()\n deregister_failed_nodes_with_no_services(args.url, delete=args.delete)\n\n\n", "sub_path": "consul_clean_dead_nodes.py", "file_name": "consul_clean_dead_nodes.py", "file_ext": "py", "file_size_in_byte": 3227, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.stderr.write", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 80, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "283811418", "text": "from sklearn import datasets\nfrom sklearn.linear_model import LinearRegression\n\nloaded_data = datasets.load_boston()\ndata_X = loaded_data.data\ndata_y = loaded_data.target\n\nmodel = LinearRegression( )\nmodel.fit(data_X,data_y)\n\n# y = ax+b\nprint(model.coef_) # print a\nprint(model.intercept_)\nprint(model.get_params()) \nprint(model.score(data_X,data_y)) # ", "sub_path": "tf/morvan/sk/ModelProperty.py", "file_name": "ModelProperty.py", "file_ext": "py", "file_size_in_byte": 356, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sklearn.datasets.load_boston", "line_number": 4, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 4, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "350163947", "text": "import re\n\nimport requests\nfrom mysql.connector import connect, Error\nfrom mysql.connector import Error\nfrom bs4 import BeautifulSoup\n\nfrom utils.consts import Consts\n\n\nclass DBConnection:\n \"\"\"\n The class creates a connection to the database.\n The following three scenarios are considered:\n 1. There is a DB and the DB object is initialized with the name of the DB\n 2. There is no database and the DB object is initialized with an database name\n 3. The DB does not exists\n\n In the __init__ method the port number doesn't get passed,\n and MySQL's default port (3306) is used.\n \"\"\"\n\n def __init__(self, host, user, password, db_name=None):\n self.host = host\n self.db_name = db_name\n self.user = user\n self.password = password\n self.connection = None\n\n def create_connection(self):\n \"\"\"create connection to the database\"\"\"\n if self.db_name:\n try:\n self.connection = connect(\n host=self.host,\n db=self.db_name,\n user=self.user,\n passwd=self.password,\n )\n cursor = self.connection.cursor()\n cursor.execute(f\"CREATE DATABASE IF NOT EXISTS {self.db_name}\")\n cursor.execute(f\"SHOW DATABASES WHERE `Database` LIKE '{self.db_name}'\")\n cuurent_db = cursor.fetchone()[0]\n print(\"You're connected to database: \", cuurent_db)\n except Error as e:\n print(e)\n print(\n f\"Database {self.db_name} does not exists. It is going to be created now...\\n\"\n )\n self.connection = connect(\n host=self.host,\n user=self.user,\n passwd=self.password,\n )\n cursor = self.connection.cursor()\n cursor.execute(f\"CREATE DATABASE IF NOT EXISTS {self.db_name}\")\n cursor.execute(f\"USE {self.db_name}\")\n cursor.execute(\n f\"SHOW DATABASES WHERE `Database` LIKE '{self.db_name}' \"\n )\n cuurent_db = cursor.fetchone()[0]\n print(\"You're connected to database: \", cuurent_db)\n\n else:\n try:\n self.connection = connect(\n host=self.host,\n user=self.user,\n passwd=self.password,\n )\n if self.connection.is_connected():\n db_info = self.connection.get_server_info()\n print(\"Connected to MySQL Server version \", db_info)\n print(\n \"You are not connected to any database.\"\n \"Creat/connect to a database by adding db_name=.\"\n )\n except Error as e:\n print(e)\n\n def create_table_query(self, table_name, columns):\n \"\"\"create a table based on MySQL syntax and performs execute and commit at the end\"\"\"\n column_str = f\"({', '.join([f'{col_name} {col_type}' for col_name, col_type in columns.items()])})\"\n cursor = self.connection.cursor()\n cursor.execute(f\"CREATE TABLE IF NOT EXISTS {table_name} {column_str}\")\n self.connection.commit()\n\n def insert_into_query(self, table_name, columns, values):\n \"\"\"\n Insert values into a table based on MySQL syntax,\n and performs executemany and commit at the end.\n\n For simpilicity, there is no check if the \"values\" is just a container of length 1.\n Because we are sure all the tables have more than one row during the scrapping process.\n \"\"\"\n col_name = f\"({', '.join([f'{c}' for c in columns.keys()])})\"\n col_format = f\"({', '.join([f'{f}' for f in columns.values()])})\"\n cursor = self.connection.cursor()\n ins_str = f\"INSERT INTO {table_name} {col_name} VALUES {col_format}\"\n cursor.executemany(ins_str, values)\n self.connection.commit()\n\n def select_table_query(self, table_name, columns, order_rand=False, limit=None):\n \"\"\"\n Returns the column values of a specific table.\n The so called \"SELECT * from table_name cannot be run.\"\n \"\"\"\n if not columns == \"*\":\n col_name = f\"{', '.join([c for c in columns])}\"\n q = f\"SELECT {col_name} FROM {table_name}\"\n else:\n q = f\"SELECT * FROM {table_name}\"\n if order_rand:\n q += \" ORDER BY RAND()\"\n if limit:\n q += f\" LIMIT {limit}\"\n cursor = self.connection.cursor()\n cursor.execute(q)\n return cursor.fetchall()\n\n def close_connection(self):\n \"\"\"Close the database connection\"\"\"\n if self.connection.is_connected():\n cursor = self.connection.cursor()\n cursor.close()\n # self.connection.close()\n print(\"MySQL connection is closed\")\n\n def delete_db(self, db_name):\n \"\"\"Delete a database\"\"\"\n if self.connection.is_connected():\n cursor = self.connection.cursor()\n cursor.execute(f\"DROP DATABASE IF EXISTS {db_name}\")\n print(f\"The database {db_name} is deleted\")\n\n def delete_table(self, tb_name):\n \"\"\"\"Delete a table\"\"\"\n cursor = self.connection.cursor()\n cursor.execute(f\"DROP TABLE IF EXISTS {tb_name}\")\n\n def show_db_name(self):\n cursor = self.connection.cursor()\n cursor.execute(f\"SHOW DATABASES WHERE `database` = '{self.db_name}' \")\n return cursor.fetchone()[0]\n\n def count_total_tables(self):\n cursor = self.connection.cursor()\n cursor.execute(\n f\"SELECT COUNT(*) AS TOTALNUMBEROFTABLES FROM \"\n f\"INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{self.db_name}'\"\n )\n return int(cursor.fetchone()[0])\n\n\ndef name_correct(name):\n \"\"\"Correct the name of chapter/table/arguments\"\"\"\n return re.subn(r\"[\\s+|\\']\", \"_\", re.subn(r\"[,():]\", \"\", name)[0])[0].replace(\n \"km/h\", \"km\"\n )[:50]\n\n\ndef get_chapters_raw(page_url):\n \"\"\"Return BeautifulSoup object of chapter page\"\"\"\n r = requests.get(page_url)\n return BeautifulSoup(r.text, \"lxml\")\n\n\ndef get_chapters(base_url, data):\n \"\"\"Maps chapter names to their dedicated URLs\"\"\"\n return {\n name_correct(chapter.text.strip()): f\"{base_url}{chapter.get('href')}\"\n for chapter in data.find_all(\"a\", class_=\"box\")[1:]\n }\n\n\ndef get_question(current_row):\n \"\"\"Returns the question text value og HTML element\"\"\"\n return current_row.find_all(\"td\", class_=\"domanda\")[0].text\n\n\ndef get_answer(current_row):\n \"\"\"Returns the answer text value og HTML element\"\"\"\n return current_row.find_all(\"td\", class_=\"risp\")[0].text.strip()\n\n\ndef get_all_raw(tbody, head, img=None):\n \"\"\"\n Returns a list of tuples [('/path/img.png', 'question', 'answer'),]\n\n If a table has image(s), but some rows does not have an image,\n the first value of the tuple is None.\n\n If the table does not have an image,\n the tuple just contains question and answer.\n \"\"\"\n table_val = []\n for row in tbody.find_all(\"tr\"):\n if head == 2:\n q, a = get_question(row), get_answer(row)\n table_val.append((q, a))\n else:\n try:\n img = row.td.img.get(\"src\")\n q, a = get_question(row), get_answer(row)\n table_val.append((img, q, a))\n except:\n q, a = get_question(row), get_answer(row)\n table_val.append((img, q, a))\n\n return table_val\n\n\ndef error_log(**kwargs):\n \"\"\"\n Prints useful information related to a table,\n in which there is an error for storing it on the DB.\n \"\"\"\n print(\n f\"*\\n> error: {kwargs['e']}\\n> table_name: {kwargs['table_name']}\\n\"\n f\"> table_value: {kwargs['table_values']}\\n> table_link: {kwargs['url']}\"\n )\n\n\ndef chapter_weigth(chapter, idx, weight):\n return {\n name_correct(args.strip()): weight\n for args in chapter[idx].strings\n if re.search(\"\\w+\", args)\n }\n", "sub_path": "utils/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 8176, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "mysql.connector.connect", "line_number": 34, "usage_type": "call"}, {"api_name": "mysql.connector.Error", "line_number": 45, "usage_type": "name"}, {"api_name": "mysql.connector.connect", "line_number": 50, "usage_type": "call"}, {"api_name": "mysql.connector.connect", "line_number": 66, "usage_type": "call"}, {"api_name": "mysql.connector.Error", "line_number": 78, "usage_type": "name"}, {"api_name": "re.subn", "line_number": 157, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 164, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 165, "usage_type": "call"}, {"api_name": "re.search", "line_number": 228, "usage_type": "call"}]} +{"seq_id": "220902281", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom .models import City, Country, State\n\n\ndef filter(request):\n context = {}\n context['country'] = Country.objects.all()\n context['state'] = State.objects.none\n context['city'] = City.objects.none\n return render(request, 'filter.html', context)\n\n\ndef filter_result(request):\n data = Country.objects.all()\n context = {'dataset': data}\n return render(request, 'includes/table_result.html', context)\n\n\ndef country_choices_ajax(request):\n data = Country.objects.all()\n context = {'countries': data}\n return render(request, 'includes/country_choices.html', context)\n\n\ndef state_choices_ajax(request):\n countryId = request.GET['uf']\n countryOb = Country.objects.get(id=countryId)\n data = State.objects.filter(country=countryOb)\n context = {'states': data}\n return render(request, 'includes/state_choices.html', context)\n\n\ndef city_choices_ajax(request):\n stateId = request.GET['uf']\n stateOb = State.objects.get(id=stateId)\n data = City.objects.filter(state=stateOb)\n context = {'cities': data}\n return render(request, 'includes/city_choices.html', context)\n", "sub_path": "filterselect/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1190, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "models.Country.objects.all", "line_number": 9, "usage_type": "call"}, {"api_name": "models.Country.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "models.Country", "line_number": 9, "usage_type": "name"}, {"api_name": "models.State.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.State", "line_number": 10, "usage_type": "name"}, {"api_name": "models.City.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.City", "line_number": 11, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Country.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Country.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Country", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Country.objects.all", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Country.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.Country", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "models.Country.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Country.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Country", "line_number": 29, "usage_type": "name"}, {"api_name": "models.State.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "models.State.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.State", "line_number": 30, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "models.State.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "models.State.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.State", "line_number": 37, "usage_type": "name"}, {"api_name": "models.City.objects.filter", "line_number": 38, "usage_type": "call"}, {"api_name": "models.City.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.City", "line_number": 38, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "119775926", "text": "from django.db.models.signals import post_save, post_init, pre_save\nfrom django.dispatch import receiver\nfrom django.contrib.auth.models import User\nfrom .models import Profile\n\n\n@receiver(post_save, sender=User)\ndef create_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_profile(sender, instance, **kwargs):\n instance.profile.save()\n\n@receiver(post_init, sender=Profile)\ndef save_old_image(sender, instance, **kwargs):\n if instance.image:\n instance._old_image = instance.image\n print(instance._old_image)\n\n@receiver(post_save, sender=Profile)\ndef auto_delete_file_on_delete(sender, instance, **kwargs):\n new_image = instance.image\n if instance._old_image != \"default.png\" and instance._old_image != new_image:\n if instance._old_image:\n instance._old_image.delete(save=False)\n instance.image = new_image\n", "sub_path": "users/signals.py", "file_name": "signals.py", "file_ext": "py", "file_size_in_byte": 964, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "models.Profile.objects.create", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Profile.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.Profile", "line_number": 10, "usage_type": "name"}, {"api_name": "django.dispatch.receiver", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models.signals.post_save", "line_number": 7, "usage_type": "argument"}, {"api_name": "django.contrib.auth.models.User", "line_number": 7, "usage_type": "name"}, {"api_name": "django.dispatch.receiver", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models.signals.post_save", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.contrib.auth.models.User", "line_number": 13, "usage_type": "name"}, {"api_name": "django.dispatch.receiver", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models.signals.post_init", "line_number": 17, "usage_type": "argument"}, {"api_name": "models.Profile", "line_number": 17, "usage_type": "name"}, {"api_name": "django.dispatch.receiver", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models.signals.post_save", "line_number": 23, "usage_type": "argument"}, {"api_name": "models.Profile", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "456850753", "text": "import time\nimport asyncio\n\nstart = time.time()\n\nasync def get_page(site_name):\n if site_name == \"API \":\n await asyncio.sleep(0.1)\n else:\n await asyncio.sleep(0.5)\n print(\"Get pages for {}\".format(site_name))\n return range(1, 4)\n\n\nasync def get_page_data(site_name, page):\n await asyncio.sleep(1)\n return \"Data from page {} ({})\".format(page, site_name)\n\n\nasync def spider(site_name): # добавив async перед def получаем асинхронную ф-ю (сопрограмма или корутин)\n all_data = []\n pages = await get_page(site_name)\n for page in pages:\n data = await get_page_data(site_name, page)\n all_data.append(data)\n return all_data\n\n\nspiders = [\n asyncio.ensure_future(spider(\"Blog \")), # ensure_future - гарантировать выполнение в будущем\n asyncio.ensure_future(spider(\"News \")),\n asyncio.ensure_future(spider(\"Forum \")),\n asyncio.ensure_future(spider(\"API \"))\n]\nevent_loop = asyncio.get_event_loop() # диспетчер событий\nresult = event_loop.run_until_complete(asyncio.gather(*spiders))\nprint(result)\nevent_loop.close()\n\nprint(time.time() - start)\n", "sub_path": "2018-2019/learning/Async_lerning/Lession4_R_U_C.py", "file_name": "Lession4_R_U_C.py", "file_ext": "py", "file_size_in_byte": 1207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "time.time", "line_number": 4, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 8, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 10, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 30, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 31, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 32, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 33, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 35, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 36, "usage_type": "call"}, {"api_name": "time.time", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "303284741", "text": "# -*- coding: utf-8 -*-\n#T. Lemberger, 2018\n\n\nimport argparse\nimport torch\nimport os\nfrom functools import lru_cache\nfrom copy import copy\nfrom typing import List\n\nNBITS = int(os.getenv('NBITS'))\n\nclass Error(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\"\n pass\n\nclass TStringTypeError(Error):\n \"\"\"\n Exception raised when TString is initialized with something else than a str, a StringList or a Tensor.\n\n Attributes:\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, x):\n super().__init__(f\"Wrong type: only str, StringList or torch.Tensor allowed whereas {x} is of type {type(x)}\")\n\nclass HeterogenousWordLengthError(Error):\n \"\"\"\n Exception raised when StringList is initialized with a list of words that are of various length.\n \"\"\"\n def __init__(self, message):\n super().__init__(message)\n\nclass ConcatenatingTStringWithUnequalDepthError(Error):\n \"\"\"\n Exception raised when 2 TStrings with different depth (number of examples) are concatenated.\n \"\"\"\n def __init__(self, d1, d2):\n super().__init__(f\"Depths of the 2 concatenated TString are not identical ({d1} != {d2}).\")\n\nclass RepeatError(Error):\n \"\"\"\n Exception raised when TString.repeat(0) is called. Rather than returning an empty tensor, raising an exception is preferred as repeating zero times is probably unintended.\n \"\"\"\n def __init__(self, N):\n super().__init__(f\"repeat with N={N} as argument is not allowed. N must be int and N > 0\")\n\nclass Converter():\n \"\"\"\n Conversion operations between strings and tensors.\n \"\"\"\n def __init__(self,):\n self.dtype = dtype\n\n def encode(self, input_string:str) -> torch.Tensor:\n raise NotImplementedError\n\n def decode(self, t: torch.Tensor) -> str:\n raise NotImplementedError\n\n\nclass ConverterNBITS(Converter):\n\n\n def __init__(self, nbits):\n self.nbits = nbits\n\n @lru_cache(maxsize=1024)\n def code2bits(self, code):\n bits = torch.Tensor([code >> i & 1 for i in range(self.nbits)])\n return bits\n\n def encode(self, input_string: str, dtype:torch.dtype=torch.float) -> torch.Tensor:\n \"\"\"\n Encodes an input string into a 3D tensor.\n\n Args:\n input_string (str): string to convert\n\n Returns:\n (torch.Tensor): 3D tensor 1 x NBITS x L, (1 example x NBITS bits x L characters) representing characters as NBITS features\n \"\"\"\n\n L = len(input_string)\n t = torch.Tensor(0)\n if L > 0:\n t = torch.zeros(1, self.nbits, L, dtype=dtype)\n for i in range(L):\n code = ord(input_string[i])\n try:\n t[0, : , i] = self.code2bits(code)\n except IndexError:\n t[0, : , i] = self.code2bits(ord('?'))\n return t\n\n def decode(self, t: torch.Tensor) -> str:\n \"\"\"\n Decodes a 3D tensor into a unicode string.\n\n Args:\n t (torch.Tensor): 3D tensor 1xNBITSxL (1 example x NBITS bits x L characters) representing characters as NBITS features\n \n Returns:\n (str): resulting string\n \"\"\"\n\n L = t.size(2)\n str = \"\"\n for i in range(L):\n code = 0\n for j in range(self.nbits):\n bit = t[0, j, i]\n try:\n bit = int(bit)\n except Exception as e:\n print(f\"{e} in converter module.\")\n print(e)\n bit = 0\n code += bit*(2**j)\n try:\n str += chr(code) #python 2: unichr()\n except ValueError:\n str += '?'\n except OverflowError:\n print(\"Error: code too large\", code)\n print(type(code))\n print(bin(code))\n print(t[0, : , i].view(-1))\n str += '?'\n return str\n\nCONVERTER = ConverterNBITS(NBITS)\n\nclass StringList:\n \"\"\"\n A class to represent a list of strings that MUST all be of identical length.\n The identical length lends itself for straight forward conversion to a tensor once encoded.\n\n Raises:\n HeterogenousWordLengthError: when the strings are of heterogenous lengths or all empty.\n \"\"\"\n\n\n def __init__(self, x: List[str]=[]):\n self._N = 0\n self._L = 0\n self._list = []\n if x:\n self._N = len(x)\n x_0 = len(x[0])\n total = len(\"\".join([e for e in x]))\n if total != self._N * x_0 or x_0 == 0:\n raise HeterogenousWordLengthError(f\"{x}: all the words have to have the same length in a StringList so that they can be stacked into same tensor when converted.\")\n self._L = x_0\n self._list = x\n\n @property\n def words(self) -> List:\n return self._list\n\n def __len__(self) -> int:\n return self._L\n\n @property\n def depth(self) -> int:\n return self._N\n\n def __add__(self, x: 'StringList') -> 'StringList':\n result = StringList([a + b for a, b in zip(self.words, x.words)])\n return result\n\n def __getitem__(self, i: int) -> str:\n result = self.words[i]\n return result\n\n def __repr__(self) -> str:\n result = \" | \".join(self.words)\n return result\n\n def __nonzero__(self) -> bool:\n return len(self) > 0\n\n def clone(self) -> 'StringList':\n cloned = StringList(copy(self.words))\n return cloned\n\n\nclass TString:\n '''\n Class to represent strings simultaneously as Tensor as a list of str. \n The number of feature used to encode one character is NBITS.\n A list of N strings of homogenous length L is encoded into a N x NBITS x L3D Tensor.\n\n Args:\n x: either a list of strings, in in which case it is converted into the corresonding Tensor;\n or a Tensor, in which case it does not need conversion but needs to be 3D with N x NBITS x L.\n If no argument is provided, TString is initialized with an empty string.\n\n Methods:\n toStringList: string list representation of TString\n __len__(): length with len(TString) and returns int\n __add__(TString): concatenates TString and returns a TString; allow operation like tstring_1 + tstring_2\n __getitem(i): gets the i-th element of each string of the list and of the underlying tensor and returns a TString; allows to slice with tstring[start:stop]\n repeat(N): repeats the TString N time\n tensor: returns the 3D (N x NBITS x L) torch.Tensor representation of the encoded list of strings\n '''\n\n def __init__(self, x = StringList(), dtype:torch.dtype=torch.float):\n self.dtype = dtype\n self.nbits = CONVERTER.nbits\n self._t = torch.zeros([], dtype=self.dtype) # empty tensor\n self._s = []\n self._L = 0 # length\n self._N = 0 # number of strings in the list, or depth\n if isinstance(x, str):\n x = StringList([x]) if x else StringList()\n if isinstance(x, torch.Tensor):\n assert x.dim() == 3 and x.size(1) == self.nbits\n self._t = x\n self._L = self._t.size(2)\n self._N = self._t.size(0)\n for i in range(self.depth):\n s = CONVERTER.decode(x[i:i+1, :, : ])\n self._s.append(s)\n elif isinstance(x, StringList):\n if x:\n self._s = x.words\n self._N = x.depth\n t_list = [CONVERTER.encode(ex, dtype=dtype) for ex in x]\n self._t = torch.cat(t_list, 0)\n self._L = self._t.size(2)\n assert self._N == self._t.size(0)\n else:\n raise TStringTypeError(x)\n\n\n\n def __str__(self):\n return \"\\n\".join(self._s)\n \n def toStringList(self) -> StringList:\n return StringList(self._s) # slight overhead due to checks of homogenous length\n\n @property\n def words(self) -> List[str]:\n return self._s # more direct\n\n @property\n def stringList(self) -> StringList:\n return self.toStringList()\n\n def __len__(self) -> int:\n return self._L\n\n @property\n def depth(self) -> int:\n return self._N\n\n def __add__(self, x: 'TString') -> 'TString':\n # overwrites tensor adding operator to make it a tensor concatenation like for strings\n # what to do when both are empty?\n if len(x) == 0:\n return self # or should it return a cloned self?\n elif len(self) == 0:\n return x # or should it return a cloned x?\n else:\n try:\n assert self.depth == x.depth\n except AssertionError:\n raise ConcatenatingTStringWithUnequalDepthError(self.depth, x.depth)\n concatenated = TString(dtype=self.dtype)\n concatenated._t = torch.cat((self.tensor, x.tensor), 2)\n concatenated._s = [a + b for a, b in zip(self.words, x.words)]\n concatenated._L = len(self) + len(x)\n concatenated._N = self._N\n return concatenated\n\n def __getitem__(self, i: int) -> 'TString':\n if len(self) == 0:\n return TString()\n else:\n item = TString(dtype=self.dtype)\n item._s = [s[i] for s in self.words]\n item._t = self.toTensor()[ : , : , i]\n item._L = 1\n item._N = self._N\n return item\n\n def repeat(self, N: int) -> 'TString':\n if N == 0 or not isinstance(N, int): \n raise RepeatError(N)\n if N == 1:\n return self\n else:\n repeated = TString(dtype=self.dtype)\n repeated._t = self.toTensor().repeat(1, 1, N) \n repeated._s = [w * N for w in self.words]\n repeated._L = len(self) * N\n repeated._N = self._N\n return repeated\n\n @property\n def tensor(self) -> torch.Tensor:\n return self._t\n\n def toTensor(self) -> torch.Tensor: # legacy method\n return self._t\n\n\ndef self_test(input_string: str):\n encoded = CONVERTER.encode(input_string)\n decode_encoded = CONVERTER.decode(encoded)\n assert input_string == decode_encoded, f\"{input_string}<>{decode_encoded}\"\n print(\"the decoded of the encoded:\", TString(TString(StringList([input_string, input_string])).tensor).toStringList())\n\n a = TString(\"a\")\n b = TString(\"b\")\n assert (a + b).toStringList().words == StringList([\"ab\"]).words\n print(TString(StringList(['12345', '67890', 'abcde'])))\n print(\"It seems it works!\")\n\ndef main():\n # more systematic tests in test.test_converter\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n parser = argparse.ArgumentParser(description=\"conversion\", formatter_class=formatter_class)\n parser.add_argument('input_string', nargs='?', default= \"αβγ∂this is so ☾😎 😎 L ‼️\" + u'\\uE000', help=\"The string to convert\")\n args = parser.parse_args()\n input_string = args.input_string\n self_test(input_string)\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "toolbox/converter.py", "file_name": "converter.py", "file_ext": "py", "file_size_in_byte": 11108, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.getenv", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 57, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 72, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.dtype", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.float", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 98, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 146, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 160, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.dtype", "line_number": 210, "usage_type": "attribute"}, {"api_name": "torch.float", "line_number": 210, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 219, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 232, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 247, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 274, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 305, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 308, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 326, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 327, "usage_type": "call"}]} +{"seq_id": "582294176", "text": "\nimport sqlite3, logging, random, string\nlogger = logging.getLogger(__name__)\n\nclass Database:\n def __init__(self, channel):\n self.db_name = f\"MarkovChain_{channel.replace('#', '').lower()}.db\"\n self._execute_queue = []\n\n # TODO: Punctuation insensitivity.\n # My ideas for such an implementation have increased the generation time by ~5x. \n # This was not worth it for me. I may revisit this at some point.\n\n # If an old version of the Database is used, update the database\n if (\"MarkovGrammarA\",) in self.execute(\"SELECT name FROM sqlite_master WHERE type='table';\", fetch=True):\n \n logger.info(\"Creating backup before updating Database...\")\n # Connect to both the new and backup, backup, and close both\n def progress(status, remaining, total):\n logging.debug(f'Copied {total-remaining} of {total} pages...')\n conn = sqlite3.connect(f\"MarkovChain_{channel.replace('#', '').lower()}.db\")\n back_conn = sqlite3.connect(f\"MarkovChain_{channel.replace('#', '').lower()}_backup.db\")\n with back_conn:\n conn.backup(back_conn, pages=1000, progress=progress)\n conn.close()\n back_conn.close()\n logger.info(\"Created backup before updating Database...\")\n \n logger.info(\"Updating Database to new version for improved efficiency...\")\n\n # Rename ...Other to ..._\n self.add_execute_queue(f\"\"\"\n CREATE TABLE IF NOT EXISTS MarkovStart_ (\n word1 TEXT COLLATE NOCASE, \n word2 TEXT COLLATE NOCASE, \n occurances INTEGER, \n PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY)\n );\n \"\"\")\n self.add_execute_queue(f\"\"\"\n CREATE TABLE IF NOT EXISTS MarkovGrammar_ (\n word1 TEXT COLLATE NOCASE,\n word2 TEXT COLLATE NOCASE,\n word3 TEXT COLLATE NOCASE,\n occurances INTEGER,\n PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY, word3 COLLATE BINARY)\n );\n \"\"\")\n self.execute_commit()\n\n # Copy data from Other to _ and remove Other\n self.add_execute_queue(\"INSERT INTO MarkovGrammar_ SELECT * FROM MarkovGrammarOther;\")\n self.add_execute_queue(\"INSERT INTO MarkovStart_ SELECT * FROM MarkovStartOther;\")\n self.add_execute_queue(\"DROP TABLE MarkovGrammarOther\")\n self.add_execute_queue(\"DROP TABLE MarkovStartOther\")\n self.execute_commit()\n\n # Copy all data from MarkovGrammarx where x is some digit to MarkovGrammar_, \n # Same with MarkovStart.\n for character in (list(string.digits)):\n self.add_execute_queue(f\"INSERT INTO MarkovGrammar_ SELECT * FROM MarkovGrammar{character}\")\n self.add_execute_queue(f\"DROP TABLE MarkovGrammar{character}\")\n self.add_execute_queue(f\"INSERT INTO MarkovStart_ SELECT * FROM MarkovStart{character}\")\n self.add_execute_queue(f\"DROP TABLE MarkovStart{character}\")\n self.execute_commit()\n\n # Split up MarkovGrammarA into MarkovGrammarAA, MarkovGrammarAB, etc.\n for first_char in list(string.ascii_uppercase) + [\"_\"]:\n for second_char in list(string.ascii_uppercase):\n self.add_execute_queue(f\"\"\"\n CREATE TABLE IF NOT EXISTS MarkovGrammar{first_char}{second_char} (\n word1 TEXT COLLATE NOCASE,\n word2 TEXT COLLATE NOCASE,\n word3 TEXT COLLATE NOCASE,\n occurances INTEGER,\n PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY, word3 COLLATE BINARY)\n );\n \"\"\")\n self.add_execute_queue(f\"INSERT INTO MarkovGrammar{first_char}{second_char} SELECT * FROM MarkovGrammar{first_char} WHERE word2 LIKE \\\"{second_char}%\\\";\")\n self.add_execute_queue(f\"DELETE FROM MarkovGrammar{first_char} WHERE word2 LIKE \\\"{second_char}%\\\";\")\n \n self.add_execute_queue(f\"\"\"\n CREATE TABLE IF NOT EXISTS MarkovGrammar{first_char}_ (\n word1 TEXT COLLATE NOCASE,\n word2 TEXT COLLATE NOCASE,\n word3 TEXT COLLATE NOCASE,\n occurances INTEGER,\n PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY, word3 COLLATE BINARY)\n );\n \"\"\")\n self.add_execute_queue(f\"INSERT INTO MarkovGrammar{first_char}_ SELECT * FROM MarkovGrammar{first_char};\")\n self.add_execute_queue(f\"DROP TABLE MarkovGrammar{first_char}\")\n self.execute_commit()\n \n logger.info(\"Finished Updating Database to new version.\")\n\n # Resolve typo in Database\n if self.execute(\"SELECT * FROM PRAGMA_TABLE_INFO('MarkovGrammarAA') WHERE name='occurances';\", fetch=True):\n logger.info(\"Updating Database to new version...\")\n for first_char in list(string.ascii_uppercase) + [\"_\"]:\n for second_char in list(string.ascii_uppercase) + [\"_\"]:\n self.execute(f\"ALTER TABLE MarkovGrammar{first_char}{second_char} RENAME COLUMN occurances TO count;\")\n self.execute(f\"ALTER TABLE MarkovStart{first_char} RENAME COLUMN occurances TO count;\")\n logger.info(\"Finished Updating Database to new version.\")\n\n for first_char in list(string.ascii_uppercase) + [\"_\"]:\n self.add_execute_queue(f\"\"\"\n CREATE TABLE IF NOT EXISTS MarkovStart{first_char} (\n word1 TEXT COLLATE NOCASE, \n word2 TEXT COLLATE NOCASE, \n count INTEGER, \n PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY)\n );\n \"\"\")\n for second_char in list(string.ascii_uppercase) + [\"_\"]:\n self.add_execute_queue(f\"\"\"\n CREATE TABLE IF NOT EXISTS MarkovGrammar{first_char}{second_char} (\n word1 TEXT COLLATE NOCASE,\n word2 TEXT COLLATE NOCASE,\n word3 TEXT COLLATE NOCASE,\n count INTEGER,\n PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY, word3 COLLATE BINARY)\n );\n \"\"\")\n sql = \"\"\"\n CREATE TABLE IF NOT EXISTS WhisperIgnore (\n username TEXT,\n PRIMARY KEY (username)\n );\n \"\"\"\n self.add_execute_queue(sql)\n self.execute_commit()\n\n # Used for randomly picking a Markov Grammar if only one word is given\n # Index 0 is for \"A\", 1 for \"B\", and 26 for everything else\n self.word_frequency = [11.6, 4.4, 5.2, 3.1, 2.8, 4, 1.6, 4.2, 7.3, 0.5, 0.8, 2.4, 3.8, 2.2, 7.6, 4.3, 0.2, 2.8, 6.6, 15.9, 1.1, 0.8, 5.5, 0.1, 0.7, 0.1, 0.5]\n \n def add_execute_queue(self, sql, values=None):\n if values is not None:\n self._execute_queue.append([sql, values])\n else:\n self._execute_queue.append([sql])\n # Commit these executes if there are more than 25 queries\n if len(self._execute_queue) > 25:\n self.execute_commit()\n \n def execute_commit(self, fetch=False):\n if self._execute_queue:\n with sqlite3.connect(self.db_name) as conn:\n cur = conn.cursor()\n cur.execute(\"begin\")\n for sql in self._execute_queue:\n cur.execute(*sql)\n self._execute_queue.clear()\n cur.execute(\"commit\")\n if fetch:\n return cur.fetchall()\n\n def execute(self, sql, values=None, fetch=False):\n with sqlite3.connect(self.db_name) as conn:\n cur = conn.cursor()\n if values is None:\n cur.execute(sql)\n else:\n cur.execute(sql, values)\n conn.commit()\n if fetch:\n return cur.fetchall()\n \n def get_suffix(self, character):\n if character.lower() in (string.ascii_lowercase):\n return character.upper()\n return \"_\"\n\n def add_whisper_ignore(self, username):\n self.execute(\"INSERT OR IGNORE INTO WhisperIgnore(username) SELECT ?\", (username,))\n \n def check_whisper_ignore(self, username):\n return self.execute(\"SELECT username FROM WhisperIgnore WHERE username = ?;\", (username,), fetch=True)\n\n def remove_whisper_ignore(self, username):\n self.execute(\"DELETE FROM WhisperIgnore WHERE username = ?\", (username,))\n\n def check_equal(self, l):\n # Check if a list contains of items that are all identical\n return not l or l.count(l[0]) == len(l)\n\n def get_next(self, index, words):\n # Get all items\n data = self.execute(f\"SELECT word3, count FROM MarkovGrammar{self.get_suffix(words[0][0])}{self.get_suffix(words[1][0])} WHERE word1 = ? AND word2 = ?;\", words, fetch=True)\n # Return a word picked from the data, using count as a weighting factor\n return None if len(data) == 0 else self.pick_word(data, index)\n\n def get_next_initial(self, index, words):\n # Get all items\n data = self.execute(f\"SELECT word3, count FROM MarkovGrammar{self.get_suffix(words[0][0])}{self.get_suffix(words[1][0])} WHERE word1 = ? AND word2 = ? AND word3 != '';\", words, fetch=True)\n # Return a word picked from the data, using count as a weighting factor\n return None if len(data) == 0 else self.pick_word(data, index)\n \n \"\"\"\n def get_next_single(self, index, word):\n # Get all items\n data = self.execute(f\"SELECT word2, count FROM MarkovGrammar{self.get_suffix(word[0])} WHERE word1 = ?;\", (word,), fetch=True)\n # Return a word picked from the data, using count as a weighting factor\n return None if len(data) == 0 else [word] + [self.pick_word(data, index)]\n \"\"\"\n \n def get_next_single_initial(self, index, word):\n # Get all items\n data = self.execute(f\"SELECT word2, count FROM MarkovGrammar{self.get_suffix(word[0])}{random.choices(string.ascii_uppercase + '_', weights=self.word_frequency)[0]} WHERE word1 = ? AND word2 != '';\", (word,), fetch=True)\n # Return a word picked from the data, using count as a weighting factor\n return None if len(data) == 0 else [word] + [self.pick_word(data, index)]\n\n def get_next_single_start(self, word):\n # Get all items\n data = self.execute(f\"SELECT word2, count FROM MarkovStart{self.get_suffix(word[0])} WHERE word1 = ?;\", (word,), fetch=True)\n # Return a word picked from the data, using count as a weighting factor\n return None if len(data) == 0 else [word] + [self.pick_word(data)]\n\n def pick_word(self, data, index=0):\n # Pick a random starting key from a weighted list\n # Note that the values are weighted based on index.\n return random.choices(data, weights=[tup[1] * ((index+1)/15) if tup[0] == \"\" else tup[1] for tup in data])[0][0]\n\n def get_start(self):\n # Find one character start from\n character = random.choice(list(string.ascii_lowercase) + [\"_\"])\n\n # Get all items\n data = self.execute(f\"SELECT * FROM MarkovStart{character};\", fetch=True)\n \n # Add each item \"count\" times\n start_list = [list(tup[:-1]) for tup in data for _ in range(tup[-1])]\n \n # If nothing has ever been said\n if len(start_list) == 0:\n return []\n\n # Pick a random starting key from this weighted list\n return random.choice(start_list)\n\n def add_rule_queue(self, item):\n # Filter out recursive case.\n if self.check_equal(item):\n return False\n if \"\" in item: #prevent adding invalid rules. Ideally this wouldn't trigger, but it seems to happen rarely.\n logger.warning(f\"Failed to add item to rules. Item contains empty string: {item}\")\n return False\n self.add_execute_queue(f'INSERT OR REPLACE INTO MarkovGrammar{self.get_suffix(item[0][0])}{self.get_suffix(item[1][0])} (word1, word2, word3, count) VALUES (?, ?, ?, coalesce((SELECT count + 1 FROM MarkovGrammar{self.get_suffix(item[0][0])}{self.get_suffix(item[1][0])} WHERE word1 = ? COLLATE BINARY AND word2 = ? COLLATE BINARY AND word3 = ? COLLATE BINARY), 1))', values=item + item)\n return True\n \n def add_start_queue(self, item):\n self.add_execute_queue(f'INSERT OR REPLACE INTO MarkovStart{self.get_suffix(item[0][0])} (word1, word2, count) VALUES (?, ?, coalesce((SELECT count + 1 FROM MarkovStart{self.get_suffix(item[0][0])} WHERE word1 = ? COLLATE BINARY AND word2 = ? COLLATE BINARY), 1))', values=item + item)\n \n def unlearn(self, message):\n words = message.split(\" \")\n tuples = [(words[i], words[i+1], words[i+2]) for i in range(0, len(words) - 2)]\n # Unlearn start of sentence from MarkovStart\n if len(words) > 1:\n # Reduce \"count\" by 5\n self.add_execute_queue(f'UPDATE MarkovStart{self.get_suffix(words[0][0])} SET count = count - 5 WHERE word1 = ? AND word2 = ?;', values=(words[0], words[1], ))\n # Delete if count is now less than 0.\n self.add_execute_queue(f'DELETE FROM MarkovStart{self.get_suffix(words[0][0])} WHERE word1 = ? AND word2 = ? AND count <= 0;', values=(words[0], words[1], ))\n # Unlearn all 3 word sections from Grammar\n for (word1, word2, word3) in tuples:\n # Reduce \"count\" by 5\n self.add_execute_queue(f'UPDATE MarkovGrammar{self.get_suffix(word1[0])}{self.get_suffix(word2[0])} SET count = count - 5 WHERE word1 = ? AND word2 = ? AND word3 = ?;', values=(word1, word2, word3, ))\n # Delete if count is now less than 0.\n self.add_execute_queue(f'DELETE FROM MarkovGrammar{self.get_suffix(word1[0])}{self.get_suffix(word2[0])} WHERE word1 = ? AND word2 = ? AND word3 = ? AND count <= 0;', values=(word1, word2, word3, ))\n self.execute_commit()\n", "sub_path": "Database.py", "file_name": "Database.py", "file_ext": "py", "file_size_in_byte": 14285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 3, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 22, "usage_type": "call"}, {"api_name": "string.digits", "line_number": 60, "usage_type": "attribute"}, {"api_name": "string.ascii_uppercase", "line_number": 68, "usage_type": "attribute"}, {"api_name": "string.ascii_uppercase", "line_number": 69, "usage_type": "attribute"}, {"api_name": "string.ascii_uppercase", "line_number": 100, "usage_type": "attribute"}, {"api_name": "string.ascii_uppercase", "line_number": 101, "usage_type": "attribute"}, {"api_name": "string.ascii_uppercase", "line_number": 106, "usage_type": "attribute"}, {"api_name": "string.ascii_uppercase", "line_number": 115, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 149, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 160, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 171, "usage_type": "attribute"}, {"api_name": "random.choices", "line_number": 210, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 210, "usage_type": "attribute"}, {"api_name": "random.choices", "line_number": 223, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 227, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 227, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 240, "usage_type": "call"}]} +{"seq_id": "192587266", "text": "from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nfrom nltk import tokenize\n\n\ndef classifyscore(score):\n if score <= -0.05:\n translatedScore = 'negative'\n elif -0.05 < score < 0.05:\n translatedScore = 'neutral'\n else:\n translatedScore = 'positive'\n return translatedScore\n\n\ndef main():\n with open('input.txt', 'r') as file:\n data = file.read().replace('\\n', ' ')\n\n sentimentAnalyzer = SentimentIntensityAnalyzer()\n\n splitText = tokenize.sent_tokenize(data)\n\n # set to False for visibility, set to true if you need to see the sentences that the nltk library considers sentences\n displayEverySingleLine = False\n\n avgSentimentOfTextLines = 0\n numOfLines = 0\n for line in splitText:\n score = sentimentAnalyzer.polarity_scores(line)\n avgSentimentOfTextLines += score['compound']\n numOfLines += 1\n if displayEverySingleLine:\n print(\"Sentimental analysis of the sentence: \" + line + str(score) + \", Overall it is \" + classifyscore(score['compound']) + \".\")\n else:\n print(\"Sentimental analysis of the sentence: \" + str(\n sentimentAnalyzer.polarity_scores(line)) + \", Overall it is \" + classifyscore(score['compound']) + \".\")\n\n print()\n\n print(\"The average score of all the lines is \" + str(avgSentimentOfTextLines/numOfLines) + \" which means the whole text is \" + classifyscore(avgSentimentOfTextLines/numOfLines) + \".\")\n\n sentimentOfDataFile = sentimentAnalyzer.polarity_scores(data)\n\n print(\"Sentimental analysis of the whole text(input.txt): \" + str(sentimentOfDataFile) + \", Overall the whole text is \" + classifyscore(sentimentOfDataFile['compound']) + \" even when passed to the sentiment analyzer as a block of text.\")\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1824, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer", "line_number": 19, "usage_type": "call"}, {"api_name": "nltk.tokenize.sent_tokenize", "line_number": 21, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "225512270", "text": "import cv2\nfrom time import sleep\nfrom os.path import dirname\nimport numpy as np\n\n\nface_cascade = cv2.CascadeClassifier(dirname(cv2.__file__) + '\\\\data\\\\haarcascade_frontalface_default.xml')\n\n\ndef write_conturs(_img, x1, x2, y1, y2, _type):\n\n\timg = _img[x1:x2, y1:y2]\n\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\tcolor = []\n\n\tfor x in range(len(gray)):\n\t\tfor y in range(len(gray[0])):\n\t\t\tcolor.append(gray[x, y])\n\n\tcolor = sum(color) / len(color)\n\n\tif _type == 'nose':\n\t\t_min = np.array((color - 80), np.uint8)\n\t\t_max = np.array((color - 50), np.uint8)\n\telif _type == 'eye':\n\t\t_min = np.array((color - 50), np.uint8)\n\t\t_max = np.array((color - 35), np.uint8)\n\n\tthresh = cv2.inRange(gray, _min, _max)\n\tcontours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\trects_of_contour = list(map(cv2.boundingRect, contours))\n\tmax_rect = max(rects_of_contour, key=lambda x: x[0] + x[2])\n\tx11, y11, x12, y12 = max_rect[0] + max_rect[2] - 1, max_rect[1] + max_rect[3] -1, max_rect[0] + max_rect[2], max_rect[1] + max_rect[3]\n\n\n\tcv2.drawContours(img, contours, -1, (255, 0, 0), 3)\n\tcv2.rectangle(img, (x11, y11), (x12, y12), (0, 255, 255), 2)\n\t\n\tfor x in range(len(img)):\n\t\tfor y in range(len(img[0])):\n\t\t\t_img[x1 + x, y1 + y] = img[x, y]\n\n\ndef find(file):\n\n\tif type(file) == str:\n\t\timg = cv2.imread(file)\n\telse:\n\t\timg = file\n\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\tfor (x, y, w, h) in face_cascade.detectMultiScale(gray, 1.3, 5):\n\n\t\t# face frame\n\t\tcv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n\n\t\t# nose frame\n\t\tx1, y1, x2, y2 = x + (w // 9) * 3, y + (h // 9) * 5, x + (w // 9) * 6, y + (h // 11) * 8\n\t\twrite_conturs(img, y1, y2, x1, x2, 'nose')\n\t\tcv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n\n\t\t# middle of face frame\n\t\tcv2.rectangle(img, (x + (w // 2), y), (x + (w // 2), y + h), (0, 255, 255), 2)\n\n\t\t# eyes frame\n\t\t(x11, y11), (x12, y12) = (x + (w // 13) * 2, y + (h // 17) * 6), (x + (w // 11) * 5, y + (h // 11) * 5)\n\t\t(x21, y21), (x22, y22) = (x + (w // 11) * 6, y + (h // 17) * 6), (x + (w // 13) * 11, y + (h // 11) * 5)\n\t\twrite_conturs(img, y11, y12, x11, x12, 'eye')\n\t\twrite_conturs(img, y21, y22, x21, x22, 'eye')\n\t\tcv2.rectangle(img, (x11, y11), (x12, y12), (0, 0, 0), 2)\n\t\tcv2.rectangle(img, (x21, y21), (x22, y22), (0, 0, 0), 2)\n\n\treturn img\n", "sub_path": "main/face_finder.py", "file_name": "face_finder.py", "file_ext": "py", "file_size_in_byte": 2322, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.__file__", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "166765312", "text": "\nfrom __future__ import print_function\n\nimport psycopg2\nimport os, sys\nimport logging\nimport time\n\nfrom dbt.compilation import Linker, Compiler\nfrom dbt.templates import BaseCreateTemplate\nfrom dbt.targets import RedshiftTarget\nfrom dbt.source import Source\nfrom dbt.utils import find_model_by_name\n\nfrom multiprocessing.dummy import Pool as ThreadPool\n\nSCHEMA_PERMISSION_DENIED_MESSAGE = \"\"\"The user '{user}' does not have sufficient permissions to create the schema '{schema}'.\nEither create the schema manually, or adjust the permissions of the '{user}' user.\"\"\"\n\nRELATION_PERMISSION_DENIED_MESSAGE = \"\"\"The user '{user}' does not have sufficient permissions to create the model '{model}' in the schema '{schema}'.\nPlease adjust the permissions of the '{user}' user on the '{schema}' schema.\nWith a superuser account, execute the following commands, then re-run dbt.\n\ngrant usage, create on schema \"{schema}\" to \"{user}\";\ngrant select on all tables in schema \"{schema}\" to \"{user}\";\"\"\"\n\nRELATION_NOT_OWNER_MESSAGE = \"\"\"The user '{user}' does not have sufficient permissions to drop the model '{model}' in the schema '{schema}'.\nThis is likely because the relation was created by a different user. Either delete the model \"{schema}\".\"{model}\" manually,\nor adjust the permissions of the '{user}' user in the '{schema}' schema.\"\"\"\n\nclass RunModelResult(object):\n def __init__(self, model, error=None, skip=False):\n self.model = model\n self.error = error\n self.skip = skip\n\n @property\n def errored(self):\n return self.error is not None\n\n @property\n def skipped(self):\n return self.skip\n\nclass Runner:\n def __init__(self, project, target_path, run_mode):\n self.logger = logging.getLogger(__name__)\n self.project = project\n self.target_path = target_path\n self.run_mode = run_mode\n\n def get_compiled_models(self):\n return Source(self.project).get_compiled(self.target_path, self.run_mode)\n\n def get_target(self):\n target_cfg = self.project.run_environment()\n return RedshiftTarget(target_cfg)\n\n def deserialize_graph(self):\n linker = Linker()\n base_target_path = self.project['target-path']\n filename = 'graph-{}.yml'.format(self.run_mode)\n graph_file = os.path.join(base_target_path, filename)\n linker.read_graph(graph_file)\n\n return linker\n\n def create_schema(self, schema_name):\n target = self.get_target()\n with target.get_handle() as handle:\n with handle.cursor() as cursor:\n cursor.execute('create schema if not exists \"{}\"'.format(schema_name))\n\n def get_schemas(self):\n target = self.get_target()\n existing = []\n with target.get_handle() as handle:\n with handle.cursor() as cursor:\n cursor.execute('select nspname from pg_catalog.pg_namespace')\n\n existing = [name for (name,) in cursor.fetchall()]\n return existing\n\n def create_schema_or_exit(self, schema_name):\n\n target_cfg = self.project.run_environment()\n user = target_cfg['user']\n\n try:\n self.create_schema(schema_name)\n except psycopg2.ProgrammingError as e:\n if \"permission denied for\" in e.diag.message_primary:\n raise RuntimeError(SCHEMA_PERMISSION_DENIED_MESSAGE.format(schema=schema_name, user=user))\n else:\n raise e\n\n def query_for_existing(self, target, schema):\n sql = \"\"\"\n select tablename as name, 'table' as type from pg_tables where schemaname = '{schema}'\n union all\n select viewname as name, 'view' as type from pg_views where schemaname = '{schema}' \"\"\".format(schema=schema)\n\n\n with target.get_handle() as handle:\n with handle.cursor() as cursor:\n cursor.execute(sql)\n existing = [(name, relation_type) for (name, relation_type) in cursor.fetchall()]\n\n return dict(existing)\n\n def get_drop_statement(self, schema, relation, relation_type):\n return 'drop {relation_type} if exists \"{schema}\".\"{relation}\" cascade'.format(schema=schema, relation_type=relation_type, relation=relation)\n\n def drop(self, target, model, relation, relation_type):\n sql = self.get_drop_statement(target.schema, relation, relation_type)\n self.logger.info(\"dropping %s %s.%s\", relation_type, target.schema, relation)\n self.execute_and_handle_permissions(target, sql, model, relation)\n self.logger.info(\"dropped %s %s.%s\", relation_type, target.schema, relation)\n\n def __do_execute(self, target, sql, model):\n with target.get_handle() as handle:\n with handle.cursor() as cursor:\n try:\n self.logger.debug(\"SQL: %s\", sql)\n pre = time.time()\n cursor.execute(sql)\n post = time.time()\n self.logger.debug(\"SQL status: %s in %d seconds\", cursor.statusmessage, post-pre)\n except Exception as e:\n e.model = model\n self.logger.exception(\"Error running SQL: %s\", sql)\n raise e\n\n def drop_models(self, models):\n target = self.get_target()\n\n existing = self.query_for_existing(target, target.schema);\n for model in models:\n model_name = model.fqn[-1]\n self.drop(target, model, model.name, existing[model_name])\n\n def get_model_by_fqn(self, models, fqn):\n for model in models:\n if tuple(model.fqn) == tuple(fqn):\n return model\n raise RuntimeError(\"Couldn't find a compiled model with fqn: '{}'\".format(fqn))\n\n def execute_wrapped_model(self, data):\n target = data['target']\n model = data['model']\n tmp_drop_type = data['tmp_drop_type']\n final_drop_type = data['final_drop_type']\n\n error = None\n try:\n self.execute_model(target, model, tmp_drop_type, final_drop_type)\n except (RuntimeError, psycopg2.ProgrammingError) as e:\n error = \"Error executing {filepath}\\n{error}\".format(filepath=model.filepath, error=str(e).strip())\n\n return RunModelResult(model, error=error)\n\n def execute_and_handle_permissions(self, target, query, model, model_name):\n try:\n self.__do_execute(target, query, model)\n except psycopg2.ProgrammingError as e:\n error_data = {\"model\": model_name, \"schema\": target.schema, \"user\": target.user}\n if 'must be owner of relation' in e.diag.message_primary:\n raise RuntimeError(RELATION_NOT_OWNER_MESSAGE.format(**error_data))\n elif \"permission denied for\" in e.diag.message_primary:\n raise RuntimeError(RELATION_PERMISSION_DENIED_MESSAGE.format(**error_data))\n else:\n raise e\n\n def rename(self, target, model):\n rename_query = model.rename_query(target.schema)\n self.logger.info(\"renaming model %s.%s --> %s.%s\", target.schema, model.tmp_name(), target.schema, model.name)\n self.execute_and_handle_permissions(target, rename_query, model, model.name)\n self.logger.info(\"renamed model %s.%s --> %s.%s\", target.schema, model.tmp_name(), target.schema, model.name)\n\n def execute_model(self, target, model, tmp_drop_type, final_drop_type):\n self.logger.info(\"executing model %s\", model)\n\n if tmp_drop_type is not None:\n self.drop(target, model, model.tmp_name(), tmp_drop_type)\n\n self.execute_and_handle_permissions(target, model.contents, model, model.tmp_name())\n\n if final_drop_type is not None:\n self.drop(target, model, model.name, final_drop_type)\n\n self.rename(target, model)\n\n def execute_models(self, linker, models, limit_to=None):\n target = self.get_target()\n\n dependency_list = linker.as_dependency_list(limit_to)\n num_models = sum([len(node_list) for node_list in dependency_list])\n\n if num_models == 0:\n print(\"WARNING: No models to run in '{}'. Try checking your model configs and running `dbt compile`\".format(self.target_path))\n return []\n\n existing = self.query_for_existing(target, target.schema);\n\n def wrap_fqn(target, models, existing, fqn):\n model = self.get_model_by_fqn(models, fqn)\n\n # False, 'view', or 'table'\n tmp_drop_type = existing.get(model.tmp_name(), None) \n final_drop_type = existing.get(model.name, None)\n return {\"model\" : model, \"target\": target, \"tmp_drop_type\": tmp_drop_type, 'final_drop_type': final_drop_type}\n\n # we can only pass one arg to the self.execute_model method below. Pass a dict w/ all the data we need\n model_dependency_list = [[wrap_fqn(target, models, existing, fqn) for fqn in node_list] for node_list in dependency_list]\n\n num_threads = target.threads\n print(\"Concurrency: {} threads (target='{}')\".format(num_threads, self.project['run-target']))\n print(\"Running!\")\n\n pool = ThreadPool(num_threads)\n\n failed_models = set()\n\n model_results = []\n for model_list in model_dependency_list:\n failed_nodes = [tuple(model.fqn) for model in failed_models]\n\n models_to_execute = [data for data in model_list if not linker.is_child_of(failed_nodes, tuple(data['model'].fqn))]\n models_to_skip = [data for data in model_list if linker.is_child_of(failed_nodes, tuple(data['model'].fqn))]\n\n for i, data in enumerate(models_to_skip):\n model = data['model']\n model_result = RunModelResult(model, skip=True)\n model_results.append(model_result)\n print(\"{} of {} -- SKIP relation {}.{} because parent failed\".format(len(model_results), num_models, target.schema, model_result.model.name))\n\n run_model_results = pool.map(self.execute_wrapped_model, models_to_execute)\n\n for run_model_result in run_model_results:\n model_results.append(run_model_result)\n\n if run_model_result.errored:\n failed_models.add(run_model_result.model)\n print(\"{} of {} -- ERROR creating relation {}.{}\".format(len(model_results), num_models, target.schema, run_model_result.model.name))\n print(run_model_result.error)\n else:\n print(\"{} of {} -- OK Created relation {}.{}\".format(len(model_results), num_models, target.schema, run_model_result.model.name))\n\n pool.close()\n pool.join()\n\n return model_results\n\n def run(self, specified_models=None):\n linker = self.deserialize_graph()\n compiled_models = self.get_compiled_models()\n\n limit_to = None\n if specified_models is not None:\n limit_to = []\n for model_name in specified_models:\n try:\n model = find_model_by_name(compiled_models, model_name)\n limit_to.append(tuple(model.fqn))\n except RuntimeError as e:\n print(\"ERROR: {}\".format(str(e)))\n print(\"Exiting\")\n return[]\n\n target_cfg = self.project.run_environment()\n schema_name = target_cfg['schema']\n\n try:\n schemas = self.get_schemas()\n\n if schema_name not in schemas:\n self.create_schema_or_exit(schema_name)\n\n return self.execute_models(linker, compiled_models, limit_to)\n except psycopg2.OperationalError as e:\n print(\"ERROR: Could not connect to the target database. Try `dbt debug` for more information\")\n print(str(e))\n sys.exit(1)\n", "sub_path": "dbt/runner.py", "file_name": "runner.py", "file_ext": "py", "file_size_in_byte": 11839, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 47, "usage_type": "call"}, {"api_name": "dbt.source.Source", "line_number": 53, "usage_type": "call"}, {"api_name": "dbt.targets.RedshiftTarget", "line_number": 57, "usage_type": "call"}, {"api_name": "dbt.compilation.Linker", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "psycopg2.ProgrammingError", "line_number": 91, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 125, "usage_type": "call"}, {"api_name": "time.time", "line_number": 127, "usage_type": "call"}, {"api_name": "psycopg2.ProgrammingError", "line_number": 157, "usage_type": "attribute"}, {"api_name": "psycopg2.ProgrammingError", "line_number": 165, "usage_type": "attribute"}, {"api_name": "multiprocessing.dummy.Pool", "line_number": 220, "usage_type": "call"}, {"api_name": "dbt.utils.find_model_by_name", "line_number": 263, "usage_type": "call"}, {"api_name": "psycopg2.OperationalError", "line_number": 280, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 283, "usage_type": "call"}]} +{"seq_id": "149608112", "text": "import urllib.request\nimport json\nimport numpy as np\nstr1 = \"http://api.tianapi.com/txapi/star/index?key=8228182d120df1024763546c91223d95&astro=aries\"\nresp = urllib.request.urlopen(str1)\ncontent = resp.read()\nt = content.decode()\nload_data = json.loads(t)\ndata = load_data.get(\"newslist\")\nresult1 = []\nfor i in data:\n result1.append(i.get(\"type\"))\n result1.append(i.get(\"content\"))\ndata2 = np.array(result1)\nprint(data)\nprint(data2)\n\n\n\n\n\n\n# {\"code\":200,\n# \"msg\":\"success\",\n# \"newslist\":\n# [\n# {\"type\":\"综合指数\",\"content\":\"60%\"},\n# {\"type\":\"爱情指数\",\"content\":\"60%\"},\n# {\"type\":\"工作指数\",\"content\":\"70%\"},\n# {\"type\":\"财运指数\",\"content\":\"50%\"},\n# {\"type\":\"健康指数\",\"content\":\"85%\"},\n# {\"type\":\"幸运颜色\",\"content\":\"青色\"},\n# {\"type\":\"幸运数字\",\"content\":\"2\"},\n# {\"type\":\"贵人星座\",\"content\":\"水瓶座\"},\n# {\"type\":\"今日概述\",\"content\":\"今天的你很有可能进行冲动支出,消费之前再三思考,切忌盲目跟风,适合自己的才是最好���。爱情方面进入平缓期,最近你们没有什么大的矛盾,可以尝试进行约会,巩固感情。\"}\n# ]\n# }\n", "sub_path": "BOT2021/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 1215, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 5, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 5, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 5, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "598167569", "text": "'''\nDiciamo che un dizionario d rappresenta un albero (e lo indichiamo come dizionario-albero)\nse ciascuna chiave di d e' un identificativo di un nodo dell'albero e l'attributo della chiave e' la lista \n(eventualmente vuota) degli identificativi dei figli del nodo. Gli identificativi dei nodi \nall'interno delle liste sono in ordine lessicografico crescente.\n\n\n\nEcco un esempio di dizionario d che rappresenta un dizionario-albero\n\nd={\n'a':['b'],\n'b':['c','d'],\n'c':['i'],\n'd':['e','l'],\n'e':['f','g','h'],\n'f':[],\n'g':[],\n'h':[],\n'i':[],\n'l':[]\n}\n\nL'albero rappresentato da d e'\n\n 'a'\n |\n _____________'b'____________ \n | | \n 'c' ________'d'_______ \n | | | \n 'i' _______'e'_______ 'l'\n | | | \n 'f' 'g' 'h'\n\n\nImplementare le seguenti funzioni:\n\n1) \nla funzione genera_sottoalbero(fnome,x,fout) che, presi:\n\n- il nome di un file json contenente un dizionario-albero d (fonome)\n- un identificativo x\n- il nome di un file json (fout)\n\nproduce il dizionario-albero che rappresenta il sottoalbero radicato \nnell'identificativo x che si ottiene dal dizionario-albero d. \nIl dizionario-albero ottenuto va registrato nel file fout.\nSe l'identificativo x non e' tra i nodi di d allora il dizionario-albero prodotto \ndeve essere vuoto.\n\nAd esempio se fnome contiene il dizionario-albero d allora dopo l'esecuzione di \ngenera_sottoalbero(fname,'d',fout)\nil file fout conterra' il dizionario\n{'f': [], 'g': [], 'h': [], 'e': ['f', 'g', 'h'], 'l': [], 'd': ['e', 'l']}\n\n\n\n2)\nla funzione cancella_sottoalbero(fnome,x,fout) che, presi:\n\n- il nome di un file json contenente un dizionario-albero d (fonome)\n- un identificativo x\n- il nome di un file json (fout)\n\nricava da d il sottoalbero radicato in x e lo salva nel file fout.\nSe x non e' presente tra le chiavi di d allora il dizionario-albero d non viene modificato.\n\nAd esempio se fnome contiene il dizionario-albero d allora dopo l'esecuzione di \ncancella_sottoalbero(fname,'d',fout)\nil file fout conterra' il dizionario\n{'a': ['b'], 'b': ['c'], 'c': ['i'], 'i':[]}\n\n\n3)\nla funzione dizionario_livelli(fnome, fout) che, presi:\n- il nome di un file json contenente un dizionario-albero d (fonome)\n- il nome di un file json (fout)\n\ncostruisce il dizionario che ha come chiavi i livelli del dizionario-albero d. L'attributo di una \nchiave di valore x e' la lista degli identificativi dei nodi che si trovano a livello x nell'albero rappresentato da d. \nLa lista è ordinata lessicograficamente ed in modo crescente. \nIl dizionario cosi' costruito va registrato nel file fout.\n\nAd esempio se fnome contiene il dizionario-albero d allora dopo l'esecuzione di \ndizionario_livelli(fname,fout)\nil file fout conterra' il dizionario\n{0: ['a'], 1: ['b'], 2: ['c', 'd'], 3: ['e','i','l'], 4: ['f', 'g', 'h']}\n\n4)\nla funzione dizionario_gradi_antenati(fnome,y,fout) che, presi:\n- il nome di un file json contenente un dizionario-albero d (fonome)\n- un intero y\n- il nome di un file json (fout)\n\ncostuisce il dizionario che ha come chiavi gli identificativi dei nodi dell'albero \nrappresentato dal dizionario-albero d, Attributo di una chiave di valore x e' il numero \ndi antenati di grado y che ha il nodo con identificativo x nell'albero.\nRegistra il dizionario costruito nel file fout.\n\nAd esempio se fnome contiene il dizionario-albero d allora dopo l'esecuzione di \ndizionario_gradi_antenati(fnome,2,fout)\nil file fout conterra' il dizionario \n{'a': 0, 'b': 0, 'c': 1, 'd': 1, 'e': 2, 'f': 2, 'g': 2, 'h': 2, 'i': 1, 'l': 2}\n\nAVVERTENZE: non usare caratteri non ASCII, come le lettere accentate; non\nimportare moduli che non sono nella libreria standard.\n'''\n\n\n\n\nimport json\nfrom copy import deepcopy\n\ndef genera_sottoalbero(fnome,x,fout):\n '''inserire qui il vostro codice'''\n with open(fnome) as json_data:\n dict_old = json.load(json_data)\n dizionario_nuovo={}\n dizionario_nuovo[x]=dict_old[x]\n lista_livelli=dict_old[x]\n dizionario_nuovo=dizionario01(dict_old,x,lista_livelli,dizionario_nuovo)\n with open(fout,'w') as j:\n json.dump(dizionario_nuovo,j)\n\ndef dizionario01(dict_old,x,lista_livelli,dizionario_nuovo):\n dizionario_nuovo1=dizionario_nuovo\n lista_livelli01=[]\n k=0\n for valore_della_lista in lista_livelli:\n if dict_old[valore_della_lista]==[]:\n dizionario_nuovo[valore_della_lista]=dict_old[valore_della_lista]\n else:\n dizionario_nuovo[valore_della_lista]=dict_old[valore_della_lista]\n for y in dict_old[valore_della_lista]:\n lista_livelli01+=[y]\n k+=1\n lista_livelli=lista_livelli01\n if lista_livelli==[]:\n return dizionario_nuovo\n return dizionario01(dict_old,x,lista_livelli,dizionario_nuovo)\n\n\n\n\ndef cancella_sottoalbero(fnome,x,fout):\n '''inserire qui il vostro codice'''\n with open(fnome) as json_data:\n dict_old = json.load(json_data)\n albero_da_cancellare=cancella_sottoalbero01(x,dict_old)\n valori_da_cancellare=albero_da_cancellare.keys()\n for da_cancellare in valori_da_cancellare:\n del dict_old[da_cancellare]\n valori_da_cercare=dict_old.keys()\n for valore in valori_da_cercare:\n for contenuto_dict_old in dict_old[valore]:\n #print('for2',contenuto_dict_old)\n if contenuto_dict_old==x:\n #print(valore)\n valore_identificato=valore\n break\n lista_contenuto=[]\n for contenuto in dict_old[valore_identificato]:\n if contenuto!=x:\n lista_contenuto+=[contenuto]\n #print(lista_contenuto)\n dict_old[valore_identificato]=lista_contenuto\n #print(dict_old)\n with open(fout,'w') as j:\n json.dump(dict_old,j)\n\ndef cancella_sottoalbero01(x,dict_old):\n '''inserire qui il vostro codice'''\n dizionario_nuovo={}\n dizionario_nuovo[x]=dict_old[x]\n lista_livelli=dict_old[x]\n return cancella_sottoalbero02(dict_old,x,lista_livelli,dizionario_nuovo)\n \n\ndef cancella_sottoalbero02(dict_old,x,lista_livelli,dizionario_nuovo):\n dizionario_nuovo1=dizionario_nuovo\n lista_livelli01=[]\n k=0\n for valore_della_lista in lista_livelli:\n if dict_old[valore_della_lista]==[]:\n dizionario_nuovo[valore_della_lista]=dict_old[valore_della_lista]\n else:\n dizionario_nuovo[valore_della_lista]=dict_old[valore_della_lista]\n for y in dict_old[valore_della_lista]:\n lista_livelli01+=[y]\n k+=1\n lista_livelli=lista_livelli01\n if lista_livelli==[]:\n return dizionario_nuovo\n return cancella_sottoalbero02(dict_old,x,lista_livelli,dizionario_nuovo)\n\n\n\n\ndef dizionario_livelli(fnome,fout):\n '''inserire qui il vostro codice'''\n with open(fnome) as json_data:\n dict_old = json.load(json_data)\n x=trova_primo_elemento(dict_old) #x=valore_iniziale\n dizionario_livelli={}\n k=0\n dizionario_livelli[k]=[x]\n lista1=dict_old[x]\n coppia_risultato=struttura_livelli(x,dict_old,dizionario_livelli,k,lista1)\n dizionario_livelli=coppia_risultato[0]\n k=coppia_risultato[1]\n k1=0\n dizionario_livelli=risistematina(dizionario_livelli,k,k1)\n with open(fout,'w') as j:\n json.dump(dizionario_livelli,j)\n\ndef trova_primo_elemento(dict_old):\n chiavi_da_cercare=dict_old.keys()\n valori_da_cercare=dict_old.values()\n lista_valori=[]\n for valore in valori_da_cercare:\n for zio in valore:\n lista_valori+=valore\n break\n for valore in chiavi_da_cercare:\n if valore not in lista_valori:\n valore_iniziale=valore\n break\n return valore_iniziale\n\ndef struttura_livelli(x,dict_old,dizionario_livelli,k,lista1):\n lista=lista1\n lista1=[]\n k+=1\n dizionario_livelli[k]=[]\n for valore in lista:\n if dizionario_livelli[k]==[]:#se dobbiamo ancora attribuirgli un valore:\n if type(valore)==type('s'):\n if valore!=[]:\n lista1+=dict_old[valore]\n dizionario_livelli[k]=lista\n elif type(valore)==type([]):\n dizionario_livelli[k]=[]\n for elemento in valore:\n if elemento!=[]:\n lista1+=dict_old[elemento]\n dizionario_livelli[k]+=[elemento]\n else:\n lista_valute=dizionario_livelli[k]\n if type(valore)==type('s'):\n if valore!=[]:\n lista1+=dict_old[valore]\n dizionario_livelli[k]=lista\n elif type(valore)==type([]):\n for elemento in valore:\n if elemento!=[]:\n lista1+=dict_old[elemento]\n dizionario_livelli[k]+=elemento\n risultato=(dizionario_livelli,k)\n if lista1!=[]:\n return struttura_livelli(x,dict_old,dizionario_livelli,k,lista1)\n return risultato\n\ndef risistematina(dizionario_livelli,k,k1):\n for attributo in dizionario_livelli:\n if type(attributo)!=type('s'):\n dizionario_livelli[k1]=sorted(dizionario_livelli[k1])\n stringa_k1=str(k1)\n dizionario_livelli[stringa_k1]=sorted(dizionario_livelli[k1])\n del dizionario_livelli[k1]\n k1+=1\n if k+1==k1:\n return dizionario_livelli\n return risistematina(dizionario_livelli,k,k1)\n \n\ndef dizionario_gradi_antenati(fnome,y,fout):\n #y=numero di grado\n #x=numero di antenati di grado y\n '''inserire qui il vostro codice'''\n with open(fnome) as json_data:\n dict_old = json.load(json_data)\n originale_dict_old=deepcopy(dict_old)\n dizionario_figli={}\n dizionario_antenati={}\n dizionariolivelli=dizionario_livelli1(fnome)\n dizionario_figli=genera_figli(dict_old,dizionario_figli)\n x=trova_primo_elemento1(originale_dict_old)\n dizionario_antenati[x]=0\n dict_old=originale_dict_old\n chiavi=[]\n chiavi1=dict_old.keys()\n for h in chiavi1:\n chiavi+=[h]\n chiavi_complete=deepcopy(chiavi)\n chiavi=chiavi[1:]\n dizionario_antenati=analizza_dati(dict_old,dizionario_figli,dizionario_antenati,y,chiavi,x,dizionariolivelli)\n #print('FINALE DIZIONARIO_ANTENATI',dizionario_antenati)\n with open(fout,'w') as j:\n json.dump(dizionario_antenati,j)\n\ndef analizza_dati(dict_old,dizionario_figli,dizionario_antenati,y,chiavi,x,dizionariolivelli):\n percorsoalbero={}\n items=[]\n items1=dict_old.items()\n for x in items1:\n items+=[x]\n i=0\n #print('CICLO',i)\n #il ciclo for posso metterlo in una funzione ricorsiva esterna che funziona grazie alla rimozione del singolo elemento nella lista delle chiavi\n for valore in chiavi:\n #print('VALORE ANALIZZA_DATI',valore)\n valore_vero=deepcopy(valore)\n percorso_albero=[]\n percorsoalbero=percorso(dizionariolivelli,dict_old,percorsoalbero,items,chiavi,valore,x,percorso_albero)\n #print('VALORE ANALIZZA_DATI DOPO PERCORSOALBERO',valore)\n #print('PERCORSOALBERO DOPO SUA CREAZIONE',percorsoalbero)\n #print('PERCORSOALBERO[VALORE]=',percorsoalbero[valore])\n percorsoalbero2={}\n percorsoalbero2[valore]=percorsoalbero[valore]\n #print('PERCORSOALBERO2',percorsoalbero2)\n #percorsoalbero2=percorso_2(valore,percorsoalbero,x,percorsoalbero2)\n dizionario_antenati=assegnazione(dizionario_figli,dizionario_antenati,percorsoalbero,y,valore)\n #print('PERCORSOALBERO2',percorsoalbero2)\n #print('CICLO',i)\n i+=1\n #print(dizionario_antenati)\n return dizionario_antenati\n\ndef assegnazione(dizionario_figli,dizionario_antenati,percorsoalbero,y,valore):\n presenze_grado=0\n for chiave_percorso in percorsoalbero[valore]:\n if dizionario_figli[chiave_percorso]==y:\n presenze_grado+=1\n dizionario_antenati[valore]=presenze_grado\n return dizionario_antenati\n\ndef percorso(dizionariolivelli,dict_old,percorsoalbero,items,chiavi,valore,x,percorso_albero):\n chiavi2=deepcopy(chiavi)\n chiavi=deepcopy(chiavi)\n #print('INIZIO',chiavi,valore)\n valore_iniziale=deepcopy(valore)\n livelli=dizionariolivelli.items()\n percorso_albero=[]\n dict_livelli=[]\n for x in livelli:\n dict_livelli+=[x]\n for k1 in range(len(items)):\n for k in range(len(chiavi2)):\n if valore in items[k][1]:\n percorso_albero+=[items[k][0]]\n break\n valore=items[k][0]\n #print('1-=',percorso_albero)\n #mi crea e mi dice il valore \n for k2 in range(len(livelli)):\n if valore_iniziale in dict_livelli[k2][1]:\n soluzione=dict_livelli[k2][0]\n break\n #print('DIZIONARIO PERCORSO ALBERO',percorsoalbero)\n if int(soluzione)==len(percorso_albero):\n break\n #'''\n #print('VALORE',valore,'PERCORSO ALBERO',percorso_albero)\n percorsoalbero[valore_iniziale]=percorso_albero\n #print('2-=',percorso_albero)\n return percorsoalbero\n \n\ndef genera_figli(dict_old,dizionario_figli):\n chiavi=dict_old.keys()\n chiavi1=[]\n for x in chiavi:\n chiavi1+=[x]\n chiave=chiavi1[0]\n if type(dict_old[chiave])==type('s'):\n dizionario_figli[chiave]=1\n del dict_old[chiave]\n else:\n dizionario_figli[chiave]=len(dict_old[chiave])\n del dict_old[chiave]\n if dict_old=={}:\n return dizionario_figli\n return genera_figli(dict_old,dizionario_figli)\n\ndef dizionario_livelli1(fnome):\n '''inserire qui il vostro codice'''\n with open(fnome) as json_data:\n dict_old = json.load(json_data)\n x=trova_primo_elemento(dict_old) #x=valore_iniziale\n dizionario_livelli={}\n k=0\n dizionario_livelli[k]=[x]\n lista1=dict_old[x]\n coppia_risultato=struttura_livelli1(x,dict_old,dizionario_livelli,k,lista1)\n dizionario_livelli=coppia_risultato[0]\n k=coppia_risultato[1]\n k1=0\n dizionario_livelli=risistematina1(dizionario_livelli,k,k1)\n return dizionario_livelli\n\ndef trova_primo_elemento1(dict_old):\n chiavi_da_cercare=dict_old.keys()\n valori_da_cercare=dict_old.values()\n lista_valori=[]\n for valore in valori_da_cercare:\n for zio in valore:\n lista_valori+=[valore]\n break\n for valore in chiavi_da_cercare:\n if valore not in lista_valori:\n valore_iniziale=valore\n break\n return valore_iniziale\n\ndef struttura_livelli1(x,dict_old,dizionario_livelli,k,lista1):\n lista=lista1\n lista1=[]\n k+=1\n dizionario_livelli[k]=[]\n for valore in lista:\n if dizionario_livelli[k]==[]:#se dobbiamo ancora attribuirgli un valore:\n if type(valore)==type('s'):\n if valore!=[]:\n lista1+=dict_old[valore]\n dizionario_livelli[k]=lista\n elif type(valore)==type([]):\n dizionario_livelli[k]=[]\n for elemento in valore:\n if elemento!=[]:\n lista1+=dict_old[elemento]\n dizionario_livelli[k]+=[elemento]\n else:\n lista_valute=dizionario_livelli[k]\n if type(valore)==type('s'):\n if valore!=[]:\n lista1+=dict_old[valore]\n dizionario_livelli[k]=lista\n elif type(valore)==type([]):\n for elemento in valore:\n if elemento!=[]:\n lista1+=dict_old[elemento]\n dizionario_livelli[k]+=elemento\n risultato=(dizionario_livelli,k)\n if lista1!=[]:\n return struttura_livelli(x,dict_old,dizionario_livelli,k,lista1)\n return risultato\n\ndef risistematina1(dizionario_livelli,k,k1):\n for attributo in dizionario_livelli:\n if type(attributo)!=type('s'):\n dizionario_livelli[k1]=sorted(dizionario_livelli[k1])\n stringa_k1=str(k1)\n dizionario_livelli[stringa_k1]=sorted(dizionario_livelli[k1])\n del dizionario_livelli[k1]\n k1+=1\n if k+1==k1:\n return dizionario_livelli\n return risistematina(dizionario_livelli,k,k1)\n \n", "sub_path": "students/1811290/homework04/program01.py", "file_name": "program01.py", "file_ext": "py", "file_size_in_byte": 16333, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "json.load", "line_number": 119, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 125, "usage_type": "call"}, {"api_name": "json.load", "line_number": 150, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 171, "usage_type": "call"}, {"api_name": "json.load", "line_number": 204, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 216, "usage_type": "call"}, {"api_name": "json.load", "line_number": 283, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 284, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 296, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 301, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 314, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 340, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 341, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 343, "usage_type": "call"}, {"api_name": "json.load", "line_number": 390, "usage_type": "call"}]} +{"seq_id": "561626260", "text": "# This is the entry point for the GCMLE and it will talk to your code through task.py\n# Courtesy of GCMLE tutorial\n\n\nimport argparse\n\n\nfrom . import model # python3.x\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n\n # Input Arguments\n parser.add_argument(\n '--train_data_paths',\n help = 'GCS or local path to training data',\n required = True\n )\n parser.add_argument(\n '--train_batch_size',\n help = 'Batch size for training steps',\n type = int,\n default = 512\n )\n parser.add_argument(\n '--train_steps',\n help = 'Steps to run the training job for',\n type = int,\n required=True\n\n )\n parser.add_argument(\n '--eval_steps',\n help = 'Number of steps to run evalution for at each checkpoint',\n default = 10,\n type = int,\n required=True\n )\n parser.add_argument(\n '--eval_data_paths',\n help = 'GCS or local path to evaluation data',\n required = True\n )\n\n # Training arguments\n parser.add_argument(\n '--embedding_vocab_sizes',\n help='as a list [country_vocab_size, sourceGameId_vocab_size, targetGameId_vocab_size, campaignId_vocab_size]',\n type=int,\n nargs='+',\n required=True,\n )\n parser.add_argument(\n '--embedding_dimentions',\n help=\"\"\"as a list [country_emb_size, sourceGameId_emb_size, targetGameId_emb_size, campaignId_emb_size]. If not\n provided, it will be cacluated base on the provided vocabualary size\"\"\",\n type=int,\n nargs='+',\n required=False,\n )\n parser.add_argument(\n '--output_dir',\n help = 'GCS location to write checkpoints and export models',\n required = True\n )\n parser.add_argument(\n '--job-dir',\n help = 'this model ignores this field, but it is required by gcloud',\n default = 'junk',\n required=False\n )\n\n # Eval arguments\n parser.add_argument(\n '--eval_delay_secs',\n help = 'How long to wait before running first evaluation',\n default = 10,\n type = int,\n required=True\n )\n parser.add_argument(\n '--min_eval_frequency',\n help = 'Seconds between evaluations',\n default = 300,\n type = int,\n required=True\n )\n parser.add_argument(\n '--save_check_point_steps',\n help='check points to be saved every these number of steps',\n required=False,\n type=int\n )\n parser.add_argument(\n '--save_summary_steps',\n help='Every these summary steps, summaries will be saved',\n type=int\n )\n parser.add_argument(\n '--epoch_number',\n type=int,\n help='Epoch number, if provided, training steps will be ignored',\n required=False\n )\n parser.add_argument(\n '--training_data_size',\n type=int,\n help='size of the training set',\n required=False\n )\n args = parser.parse_args()\n arguments = args.__dict__\n\n if arguments.get('epoch_number', -1):\n print(\"epoch_number provided, the following values will be ignored \"\n \"save_check_point_steps, save_summary_steps and will be readjusted based on the epoch number\"\n )\n readjusted_training_steps =\\\n (arguments['epoch_number'] * arguments['training_data_size'])/ arguments['train_batch_size']\n\n #TODO take the validation frequency, checkpoint frequency, summary frequency through arguments\n # save checkpoint every 0.25 epoch\n readjusted_save_check_point_steps = \\\n (0.25 *arguments['training_data_size']) // (arguments['train_batch_size'])\n\n print('readjusted training steps {} , readjusted_save_check_point_steps {}'\\\n .format(readjusted_training_steps, readjusted_save_check_point_steps))\n\n arguments['train_steps'] = readjusted_training_steps\n arguments['save_check_point_steps'] = readjusted_save_check_point_steps\n\n # Unused args provided by service\n arguments.pop('job_dir', None)\n arguments.pop('job-dir', None)\n\n output_dir = arguments['output_dir']\n # Append trial_id to path if we are doing hptuning\n # This code can be removed if you are not using hyperparameter tuning\n \"\"\"\n output_dir = os.path.join(\n output_dir,\n json.loads(\n os.environ.get('TF_CONFIG', '{}')\n ).get('task', {}).get('trail', '')\n )\n \"\"\"\n # Run the training job\n model.train_and_evaluate(arguments)\n\n", "sub_path": "trainer/task.py", "file_name": "task.py", "file_ext": "py", "file_size_in_byte": 4556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "610885834", "text": "import sys\nfrom collections import deque\n \ninput = sys.stdin.readline\n \nn, m, k, x = map(int, input().split())\nvisited = [False] * (n + 1)\n \npath = [[] for _ in range(n + 1)]\n\nfor _ in range(m):\n a, b = map(int, input().split())\n path[a].append(b)\n\nprint(path)\n \nanswer = list()\nqueue = deque()\nqueue.append((x, 0))\n\nwhile queue:\n town, count = queue.popleft()\n if count == k:\n answer.append(town)\n elif count < k:\n for con in path[town]:\n if not visited[con]:\n visited[con] = True\n queue.append((con, count + 1))\n \nif len(answer) == 0:\n print(-1)\nelse:\n answer.sort()\n for ans in answer:\n print(ans)", "sub_path": "Algorithm/boj/최단거리/18352 특정 거리의 도시 찾기.py", "file_name": "18352 특정 거리의 도시 찾기.py", "file_ext": "py", "file_size_in_byte": 686, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.stdin", "line_number": 4, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "407489873", "text": "#!/usr/bin/python3\n\nimport networkx\nimport collections\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport operator\nimport copy\n\n\nclass ConstraintViolation(Exception):\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n \"\"\"docstring for __str__\"\"\"\n return repr(self.value)\n\n\nclass BayesianNetwork(networkx.DiGraph):\n def __init__(self, data=None, **attr):\n super().__init__(data=None, **attr)\n\n def add_node(self, n, attr_dict=None, **attr):\n \"\"\"docstring for add_node\"\"\"\n super().add_node(n, attr_dict=None, **attr)\n\n def add_edge(self, u, v, attr_dict=None, **attr):\n super().add_edge(u, v, attr_dict=None, **attr)\n if not networkx.is_directed_acyclic_graph(self):\n super().remove_edge(u, v)\n raise ConstraintViolation('Edge', str(u), str(v), 'violates DAG constraint, possibly creates a cycle')\n\n def model_probability(self, data):\n pass\n\n def BDeu_prior(self, alpha):\n for nodedata in self.node.values():\n nodedata['values'] = np.ones(nodedata['values'].shape) * alpha / nodedata['values'].size\n\n def data_probability(self, data):\n total = 0\n counts = copy.deepcopy(prior)\n result = []\n for vector in data:\n for idx, value in enumerate(vector):\n counts[idx][value - 1] += 1\n for c, t in zip(counts, total):\n result.append(list(map(operator.truediv, c, t)))\n return result\n\n\ndef construct_nodes(variables):\n graph = BayesianNetwork()\n for node_name, vals in variables.items():\n graph.add_node(node_name, values=np.ones(vals.shape))\n return graph\n\n\ndef load_data():\n data = np.genfromtxt('training_data.txt', skip_header=1, dtype=int)\n f = open('training_data.txt')\n names = f.readline().strip().split(' ')\n f.close()\n variables = {}\n for idx, name in enumerate(names):\n variables[name] = np.unique(data.T[idx])\n return variables, data\n\nif __name__ == '__main__':\n variable_names, data = load_data()\n g = construct_nodes(variable_names, data)\n networkx.draw_circular(g)\n plt.savefig('test.png')\n", "sub_path": "structure_learning.py", "file_name": "structure_learning.py", "file_ext": "py", "file_size_in_byte": 2183, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "networkx.DiGraph", "line_number": 20, "usage_type": "attribute"}, {"api_name": "networkx.is_directed_acyclic_graph", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 39, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 43, "usage_type": "call"}, {"api_name": "operator.truediv", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 67, "usage_type": "call"}, {"api_name": "networkx.draw_circular", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "1175939", "text": "from discord.ext import commands\n\nfrom cogs import utils\n\n\nclass Misc(utils.Cog):\n\n @commands.command(aliases=['git', 'code'], cls=utils.Command)\n @utils.checks.is_config_set('command_data', 'github')\n async def github(self, ctx:utils.Context):\n \"\"\"Sends the GitHub Repository link\"\"\"\n\n await ctx.send(f\"<{self.bot.config['command_data']['github']}>\")\n\n @commands.command(aliases=['support', 'guild'], cls=utils.Command)\n @utils.checks.is_config_set('command_data', 'guild_invite')\n async def server(self, ctx:utils.Context):\n \"\"\"Gives the invite to the support server\"\"\"\n\n await ctx.send(f\"<{self.bot.config['command_data']['guild_invite']}>\")\n\n @commands.command(aliases=['patreon'], cls=utils.Command)\n @utils.checks.is_config_set('command_data', 'patreon')\n async def donate(self, ctx:utils.Context):\n \"\"\"Gives you the bot's creator's Patreon\"\"\"\n\n await ctx.send(f\"<{self.bot.config['command_data']['patreon']}>\")\n\n @commands.command(cls=utils.Command)\n async def invite(self, ctx:utils.Context):\n \"\"\"Gives you the bot's invite link\"\"\"\n\n await ctx.send(f\"<{self.bot.get_invite_link()}>\")\n\n @commands.command(cls=utils.Command)\n async def echo(self, ctx:utils.Context, *, content:utils.converters.CleanContent):\n \"\"\"Echos the given content into the channel\"\"\"\n\n await ctx.send(content)\n\n\ndef setup(bot:utils.Bot):\n x = Misc(bot)\n bot.add_cog(x)\n", "sub_path": "cogs/misc.py", "file_name": "misc.py", "file_ext": "py", "file_size_in_byte": 1463, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "cogs.utils.Cog", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 6, "usage_type": "name"}, {"api_name": "cogs.utils.Context", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 10, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 8, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 8, "usage_type": "name"}, {"api_name": "cogs.utils.Command", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 8, "usage_type": "name"}, {"api_name": "cogs.utils.checks.is_config_set", "line_number": 9, "usage_type": "call"}, {"api_name": "cogs.utils.checks", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 9, "usage_type": "name"}, {"api_name": "cogs.utils.Context", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 17, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 15, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 15, "usage_type": "name"}, {"api_name": "cogs.utils.Command", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 15, "usage_type": "name"}, {"api_name": "cogs.utils.checks.is_config_set", "line_number": 16, "usage_type": "call"}, {"api_name": "cogs.utils.checks", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 16, "usage_type": "name"}, {"api_name": "cogs.utils.Context", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 24, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 22, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 22, "usage_type": "name"}, {"api_name": "cogs.utils.Command", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 22, "usage_type": "name"}, {"api_name": "cogs.utils.checks.is_config_set", "line_number": 23, "usage_type": "call"}, {"api_name": "cogs.utils.checks", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 23, "usage_type": "name"}, {"api_name": "cogs.utils.Context", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 30, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 29, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 29, "usage_type": "name"}, {"api_name": "cogs.utils.Command", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 29, "usage_type": "name"}, {"api_name": "cogs.utils.Context", "line_number": 36, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 36, "usage_type": "name"}, {"api_name": "cogs.utils.converters", "line_number": 36, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 35, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 35, "usage_type": "name"}, {"api_name": "cogs.utils.Command", "line_number": 35, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 35, "usage_type": "name"}, {"api_name": "cogs.utils.Bot", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cogs.utils", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "248359955", "text": "import crepe\nfrom scipy.io import wavfile\nimport matplotlib.pyplot as plt\nimport librosa\nfrom filters import *\nfrom create_base import *\n\n\nfilename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/节奏/节奏8_40434(30).wav'\nfilename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/旋律/视唱1-01(95).wav'\nfilename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/旋律/视唱1-02(90).wav'\nfilename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/旋律/旋律1.1(95).wav'\nfilename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/旋律/旋律2(四)(96).wav'\nfilename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/旋律/旋律2卢(98).wav'\nfilename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/旋律/旋律1.1(95).wav'\n#filename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/旋律/旋律2.1(80).wav'\n#filename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/旋律/旋律2.3(55).wav'\nfilename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/旋律/旋律二(10)(75).wav'\n#filename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/旋律/旋律二(8)(100).wav'\n\n\nsr, audio = wavfile.read(filename)\naudio, sr = load_and_trim(filename)\ntime = librosa.get_duration(audio)\nprint(\"time is {}\".format(time))\ntime, frequency, confidence, activation = crepe.predict(audio, sr,model_capacity='full', step_size=10, viterbi=True)\nstep = 3\nlen = len(frequency)\nfrequency = ArithmeticAverage(frequency.copy(), step)\n#y2 = ArithmeticAverage(y.copy(),step)\n#y2 = MedianAverage(y.copy(),step)\n\nfrequency = expand_output(frequency,step,len)\nfrequency = get_nearly_note(frequency,step)\nplt.axhline(y=262,color='r',linestyle='dashed')\nplt.axhline(y=294,color='r',linestyle='dashed')\nplt.axhline(y=330,color='r',linestyle='dashed')\nplt.axhline(y=349,color='r',linestyle='dashed')\nplt.axhline(y=392,color='r',linestyle='dashed')\nplt.axhline(y=440,color='r',linestyle='dashed')\nplt.axhline(y=494,color='r',linestyle='dashed')\nplt.plot(time,frequency)\nplt.ylim(200,500)\n\nplt.show()\nprint(frequency)\n", "sub_path": "raw_feature/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 2075, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "scipy.io.wavfile.read", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 22, "usage_type": "name"}, {"api_name": "librosa.get_duration", "line_number": 24, "usage_type": "call"}, {"api_name": "crepe.predict", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "627600825", "text": "#!/usr/bin/python3\n\nimport os\nfrom http.cookiejar import Cookie\n\nprint(\"Content-type: text-html\")\nprint()\n\nprint(\"\"\"\n\n\n \n 菜鸟教程(runoob.com)\n\n\n

读取cookie信息

\n\"\"\")\nprint(os.environ)\nif 'HTTP_COOKIE' in os.environ:\n print('HTTP_COOKIE')\n cookie_string = os.environ.get('HTTP_COOKIE')\n print(cookie_string)\n c = Cookie.SimpleCookie()\n c.load(cookie_string)\n\n try:\n data = c['name'].value\n print(\"cookie data: \"+data+\"
\")\n except KeyError:\n print(\"cookie 没有设置或者已过去
\")\n\nprint(\"\"\"\n\n\n\"\"\")\n", "sub_path": "cgi/cookie_get.py", "file_name": "cookie_get.py", "file_ext": "py", "file_size_in_byte": 653, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 21, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "http.cookiejar.Cookie.SimpleCookie", "line_number": 23, "usage_type": "call"}, {"api_name": "http.cookiejar.Cookie", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "126960468", "text": "#!/usr/bin/python\n#\n# Copyright 2018-2022 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom uuid import UUID\n\nfrom polyaxon.polyflow import V1RunKind\n\n\ndef get_fxt_service():\n return {\n \"version\": 1.1,\n \"kind\": \"operation\",\n \"name\": \"foo\",\n \"description\": \"a description\",\n \"tags\": [\"tag1\", \"tag2\"],\n \"trigger\": \"all_succeeded\",\n \"component\": {\n \"name\": \"service-template\",\n \"tags\": [\"backend\", \"lab\"],\n \"run\": {\n \"kind\": V1RunKind.SERVICE,\n \"container\": {\"image\": \"jupyter\"},\n \"init\": [{\"connection\": \"foo\", \"git\": {\"revision\": \"dev\"}}],\n \"ports\": [5555],\n },\n },\n }\n\n\ndef get_fxt_service_with_inputs():\n return {\n \"version\": 1.1,\n \"kind\": \"operation\",\n \"name\": \"foo\",\n \"description\": \"a description\",\n \"params\": {\"image\": {\"value\": \"foo/bar\"}},\n \"component\": {\n \"name\": \"service-template\",\n \"inputs\": [{\"name\": \"image\", \"type\": \"str\"}],\n \"tags\": [\"backend\", \"lab\"],\n \"run\": {\n \"kind\": V1RunKind.SERVICE,\n \"container\": {\"image\": \"{{ image }}\"},\n \"init\": [{\"connection\": \"foo\", \"git\": {\"revision\": \"dev\"}}],\n \"ports\": [5555],\n },\n },\n }\n\n\ndef get_fxt_service_with_upstream_runs(run_uuid: UUID):\n return {\n \"version\": 1.1,\n \"kind\": \"operation\",\n \"name\": \"foo\",\n \"description\": \"a description\",\n \"params\": {\n \"image\": {\n \"value\": \"outputs.image-out\",\n \"ref\": \"runs.{}\".format(run_uuid.hex),\n }\n },\n \"component\": {\n \"name\": \"service-template\",\n \"inputs\": [{\"name\": \"image\", \"type\": \"str\"}],\n \"tags\": [\"backend\", \"lab\"],\n \"run\": {\n \"kind\": V1RunKind.SERVICE,\n \"container\": {\"image\": \"{{ image }}\"},\n \"init\": [{\"connection\": \"foo\", \"git\": {\"revision\": \"dev\"}}],\n \"ports\": [5555],\n },\n },\n }\n\n\ndef get_fxt_job_with_hub_ref():\n return {\n \"version\": 1.1,\n \"kind\": \"operation\",\n \"name\": \"foo\",\n \"description\": \"a description\",\n \"params\": {\"image\": {\"value\": \"foo/bar\"}},\n \"hubRef\": \"notebook\",\n }\n", "sub_path": "platform/polycommon/polycommon/test_cases/fixtures/services.py", "file_name": "services.py", "file_ext": "py", "file_size_in_byte": 2919, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "polyaxon.polyflow.V1RunKind.SERVICE", "line_number": 34, "usage_type": "attribute"}, {"api_name": "polyaxon.polyflow.V1RunKind", "line_number": 34, "usage_type": "name"}, {"api_name": "polyaxon.polyflow.V1RunKind.SERVICE", "line_number": 55, "usage_type": "attribute"}, {"api_name": "polyaxon.polyflow.V1RunKind", "line_number": 55, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 64, "usage_type": "name"}, {"api_name": "polyaxon.polyflow.V1RunKind.SERVICE", "line_number": 81, "usage_type": "attribute"}, {"api_name": "polyaxon.polyflow.V1RunKind", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "215516557", "text": "#!/usr/bin/env python3\n\nimport json\nimport os\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nfrom statistics import mean, median\nfrom typing import Any, Dict\n\n\ndef parse_arguments():\n \"\"\"parses the arguments of the program for the data directory\"\"\"\n usage = \"python %(prog)s \"\n parser = ArgumentParser(usage=usage)\n parser.add_argument(\n \"data_dir\",\n type=str,\n action=\"store\",\n help=\"directory containing potential subdirectories with \"\n \"the json files with performance data\",\n )\n return parser.parse_args()\n\n\ndef get_statistics_from_data(data_dict: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n collects basic statistics from the raw data and writes them into the summary file\n \"\"\"\n\n median_points = []\n mean_points = []\n for rankdata in data_dict[\"times\"]:\n median_points.append(median(rankdata))\n mean_points.append(mean(rankdata))\n\n returnvalue: Dict[str, Any] = {\"medians\": {}, \"means\": {}}\n returnvalue[\"hits\"] = data_dict[\"hits\"]\n\n returnvalue[\"medians\"][\"mean_of_medians\"] = mean(median_points)\n returnvalue[\"medians\"][\"max_of_medians\"] = max(median_points)\n returnvalue[\"medians\"][\"min_of_medians\"] = min(median_points)\n returnvalue[\"means\"][\"mean_of_means\"] = mean(mean_points)\n returnvalue[\"means\"][\"80th percentile of means\"] = np.percentile(mean_points, 80)\n returnvalue[\"means\"][\"20th percentile of means\"] = np.percentile(mean_points, 20)\n\n return returnvalue\n\n\ndef write_summary_file(fullpath: str, summary_data: Dict[str, Any]):\n \"\"\"writes the summary file to the given path\"\"\"\n with open(str(Path(fullpath).with_suffix(\"\")) + \"_summary.json\", \"w\") as f:\n json.dump(summary_data, f, sort_keys=True, indent=4)\n\n\ndef analyze_file_at_path(fullpath: str) -> None:\n \"\"\"analyzes the file at a given path and writes a summary file\"\"\"\n with open(fullpath) as f:\n data = json.load(f)\n summary_data: Dict[str, Any] = {\"times\": {}, \"setup\": {}}\n summary_data[\"setup\"].update(data[\"setup\"])\n for data_set, times in data[\"times\"].items():\n summary_data[\"times\"][data_set] = get_statistics_from_data(times)\n\n write_summary_file(fullpath, summary_data)\n\n\ndef is_valid_file(fullpath: str):\n \"\"\"checks if the file needs to be summarized\"\"\"\n return (\n fullpath.endswith(\".json\")\n and \"memory_usage\" not in fullpath\n and \"summary\" not in fullpath\n )\n\n\ndef analyze_foler(data_directory):\n performance_dir = data_directory + \"/fv3core_performance/\"\n for subdir, _, files in os.walk(performance_dir):\n for file in files:\n fullpath = os.path.join(subdir, file)\n if is_valid_file(fullpath):\n analyze_file_at_path(fullpath)\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n analyze_foler(args.data_dir)\n", "sub_path": "summary_files/json_summary.py", "file_name": "json_summary.py", "file_ext": "py", "file_size_in_byte": 2904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 26, "usage_type": "name"}, {"api_name": "statistics.median", "line_number": 34, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 35, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 37, "usage_type": "name"}, {"api_name": "statistics.mean", "line_number": 40, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 45, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 50, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 52, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 53, "usage_type": "call"}, {"api_name": "json.load", "line_number": 59, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 60, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 60, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}]} +{"seq_id": "299049595", "text": "# -*- coding: utf-8 -*-\n\n# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\n\nfrom fuel_upgrade import utils\n\nfrom fuel_upgrade.engines.host_system import HostSystemUpgrader\nfrom fuel_upgrade.pre_upgrade_hooks.base import PreUpgradeHookBase\n\nlogger = logging.getLogger(__name__)\n\n\nclass AddMonitordKeystoneCredentialsHook(PreUpgradeHookBase):\n \"\"\"Monitoring service Keystone credentials: [1].\n\n This patch updates the astute.yaml file adding 'monitord' user credentials.\n This user is required to create Fuel notifications when disk space on\n master node is getting low. We don't want to use the standard 'admin' user\n because when user changes password via UI it's not reflected in the\n astute.yaml file.\n\n [1] https://bugs.launchpad.net/fuel/+bug/1371757\n \"\"\"\n\n # : This hook required only for docker and host system engines\n enable_for_engines = [HostSystemUpgrader]\n\n # : New credentials\n keystone_config = {\n 'keystone': {\n \"monitord_user\": \"monitord\",\n \"monitord_password\": utils.generate_uuid_string(),\n }\n }\n\n def check_if_required(self):\n return len(\n set(self.keystone_config['keystone']).difference(\n self.config.astute.get('keystone', {})\n )\n )\n\n def run(self):\n \"\"\"Adds default credentials to config file\n \"\"\"\n self.update_astute_config(defaults=self.keystone_config)\n", "sub_path": "fuel_upgrade_system/fuel_upgrade/fuel_upgrade/pre_upgrade_hooks/from_6_0_to_any_add_monitord_credentials.py", "file_name": "from_6_0_to_any_add_monitord_credentials.py", "file_ext": "py", "file_size_in_byte": 2007, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "fuel_upgrade.pre_upgrade_hooks.base.PreUpgradeHookBase", "line_number": 27, "usage_type": "name"}, {"api_name": "fuel_upgrade.engines.host_system.HostSystemUpgrader", "line_number": 40, "usage_type": "name"}, {"api_name": "fuel_upgrade.utils.generate_uuid_string", "line_number": 46, "usage_type": "call"}, {"api_name": "fuel_upgrade.utils", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "11505132", "text": "import numpy as np\nimport torch\nfrom batchgenerators.utilities.file_and_folder_operations import *\n\nfrom nnunet.training.data_augmentation.default_data_augmentation import get_default_augmentation\nfrom nnunet.training.dataloading.dataset_loading import load_dataset, DataLoader3D, DataLoader2D, unpack_dataset\n\nimport os\n\n\nfrom torch.utils.data import TensorDataset, DataLoader\n\nfrom nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2\nfrom nnunet.utilities.to_torch import maybe_to_torch\n\n\ndef ACDC_dataset():\n t = \"Task213_ACDC\"\n p = join(\"/home/lz/nnu/nnUNet_preprocessed\", t, \"nnUNetData_plans_v2.1_2D_stage0\")\n dataset = load_dataset(p)\n with open(join(join(\"/home/lz/nnu/nnUNet_preprocessed\", t), \"nnUNetPlansv2.1_plans_2D.pkl\"), 'rb') as f:\n plans = pickle.load(f)\n\n unpack_dataset(p)\n dl2d = DataLoader2D(dataset, (256, 256), np.array([256, 256]).astype(int)[1:], 150,\n oversample_foreground_percent=0.33)\n batch = dl2d.generate_train_batch()\n print(batch[\"seg\"].__class__)\n\n data = torch.Tensor(batch[\"data\"])\n seg = torch.Tensor(batch[\"seg\"])\n\n dataset = TensorDataset(data, seg)\n\n return dataset\n\nif __name__ == '__main__':\n t = \"Task213_ACDC\"\n p = join(\"/home/lz/nnu/nnUNet_preprocessed\", t, \"nnUNetData_plans_v2.1_2D_stage0\")\n dataset = load_dataset(p)\n with open(join(join(\"/home/lz/nnu/nnUNet_preprocessed\", t), \"nnUNetPlansv2.1_plans_2D.pkl\"), 'rb') as f:\n plans = pickle.load(f)\n\n unpack_dataset(p)\n dl2d = DataLoader2D(dataset, (256, 256), np.array([256, 256]).astype(int)[1:], 16,\n oversample_foreground_percent=0.33)\n tr, val = get_default_augmentation(dl2d, dl2d, np.array([256, 256]).astype(int))\n # for i in range():\n # __ =next(tr)\n # datict = next(tr)\n # data = data_dict['data']\n # target = data_dict['target']\n # print(data_dict[\"keys\"])\n\n trainer = nnUNetTrainerV2(fold=0, dataset_directory=\"/home/lz/nnu/nnUNet_preprocessed/Task213_ACDC\", plans_file=\"/home/lz/nnu/nnUNet_preprocessed/Task213_ACDC/nnUNetPlansv2.1_plans_2D.pkl\",\n output_folder=\"/home/lz/Trans-nnUNet\")\n\n trainer.initialize()\n\n tr_dl, val_dl = trainer.ret_dataloader()\n tr_gen, val_gen = get_default_augmentation(tr_dl, val_dl, np.array([256, 256]).astype(int))\n\n for i in range(1):\n __ =next(tr_gen)\n data_dict = next(tr_gen)\n data = data_dict['data']\n target = data_dict['target']\n #target = torch.Tensor(target)\n print(target.__class__)\n print(data_dict[\"keys\"])\n\n #testdata = dl2d.get_item(0)\n #print(testdata[\"case_properties\"])\n\n\n # path = \"/home/lz/nnu/nnUNet_preprocessed/patient001_frame01.npy\"\n # inst = np.load(path, \"r\")\n # print(inst.shape[1])\n #\n # datapaths = [f'/home/lz/nnu/nnUNet_preprocessed/']\n #\n # gt = join(\"/home/lz/nnu/nnUNet_preprocessed\", t, \"gt_segmentations\")\n # niftilist = os.listdir(gt)\n # npylist = sorted([(item[0:-6]+ \"npy\") for item in niftilist])\n # print(npylist)\n # datamat = [np.load(join(p, item)) for item in npylist]\n #print(datamat)\n\n", "sub_path": "nnunet/DataModules/Acdc_LOADER-test.py", "file_name": "Acdc_LOADER-test.py", "file_ext": "py", "file_size_in_byte": 3182, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "nnunet.training.dataloading.dataset_loading.load_dataset", "line_number": 20, "usage_type": "call"}, {"api_name": "nnunet.training.dataloading.dataset_loading.unpack_dataset", "line_number": 24, "usage_type": "call"}, {"api_name": "nnunet.training.dataloading.dataset_loading.DataLoader2D", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 33, "usage_type": "call"}, {"api_name": "nnunet.training.dataloading.dataset_loading.load_dataset", "line_number": 40, "usage_type": "call"}, {"api_name": "nnunet.training.dataloading.dataset_loading.unpack_dataset", "line_number": 44, "usage_type": "call"}, {"api_name": "nnunet.training.dataloading.dataset_loading.DataLoader2D", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "nnunet.training.data_augmentation.default_data_augmentation.get_default_augmentation", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "nnunet.training.network_training.nnUNetTrainerV2.nnUNetTrainerV2", "line_number": 55, "usage_type": "call"}, {"api_name": "nnunet.training.data_augmentation.default_data_augmentation.get_default_augmentation", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "39190247", "text": "from ftw.testbrowser import browsing\nfrom opengever.testing import IntegrationTestCase\nfrom plone import api\nfrom zope.component import getMultiAdapter\n\n\nclass TestGeverLayoutPolicy(IntegrationTestCase):\n\n @browsing\n def test_bumblebee_feature(self, browser):\n self.login(self.regular_user, browser)\n feature_class = 'feature-bumblebee'\n\n self.deactivate_feature('bumblebee')\n browser.open()\n self.assertNotIn(feature_class, browser.css('body').first.classes)\n\n self.activate_feature('bumblebee')\n browser.open()\n self.assertIn(feature_class, browser.css('body').first.classes)\n\n @browsing\n def test_word_meeting_feature_presence(self, browser):\n self.login(self.regular_user, browser)\n\n feature_class = 'feature-word-meeting'\n\n self.activate_feature('meeting')\n browser.open()\n self.assertIn(feature_class, browser.css('body').first.classes)\n\n self.deactivate_feature('meeting')\n browser.open()\n self.assertNotIn(feature_class, browser.css('body').first.classes)\n\n @browsing\n def test_no_model_class_on_regular_content(self, browser):\n self.login(self.regular_user, browser)\n browser.open(self.dossier)\n self.assertEquals([],\n filter(lambda classname: classname.startswith('model-'),\n browser.css('body').first.classes))\n\n @browsing\n def test_model_class_on_sql_wrapper(self, browser):\n self.login(self.committee_responsible, browser)\n self.activate_feature('meeting')\n browser.open(self.meeting)\n self.assertIn('model-meeting', browser.css('body').first.classes)\n\n def test_render_base_returns_correct_url(self):\n self.login(self.manager)\n portal = api.portal.get()\n contents = api.content.find(context=portal)\n portal_types = set(el.portal_type for el in contents)\n for portal_type in portal_types:\n brains = api.content.find(portal_type=portal_type)\n obj = brains[0].getObject()\n layout = getMultiAdapter((obj, self.request), name=u'plone_layout')\n self.assertEqual(layout.renderBase(), obj.absolute_url().rstrip(\"/\") + \"/\")\n", "sub_path": "opengever/base/tests/test_layout.py", "file_name": "test_layout.py", "file_ext": "py", "file_size_in_byte": 2253, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "opengever.testing.IntegrationTestCase", "line_number": 7, "usage_type": "name"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 9, "usage_type": "name"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 22, "usage_type": "name"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 36, "usage_type": "name"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 44, "usage_type": "name"}, {"api_name": "plone.api.portal.get", "line_number": 53, "usage_type": "call"}, {"api_name": "plone.api.portal", "line_number": 53, "usage_type": "attribute"}, {"api_name": "plone.api", "line_number": 53, "usage_type": "name"}, {"api_name": "plone.api.content.find", "line_number": 54, "usage_type": "call"}, {"api_name": "plone.api.content", "line_number": 54, "usage_type": "attribute"}, {"api_name": "plone.api", "line_number": 54, "usage_type": "name"}, {"api_name": "plone.api.content.find", "line_number": 57, "usage_type": "call"}, {"api_name": "plone.api.content", "line_number": 57, "usage_type": "attribute"}, {"api_name": "plone.api", "line_number": 57, "usage_type": "name"}, {"api_name": "zope.component.getMultiAdapter", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "258521441", "text": "import os\nimport copy\nfrom src.utils import common\nfrom pandas.core.frame import DataFrame\nimport pyspark.sql.functions as f\nfrom pyspark.sql.functions import when,_collect_list_doc,split,concat_ws\nimport pandas as pd\n\nfrom pyspark.sql.types import StringType,StructField,StructType,ArrayType,DataType\nimport re\nfrom bs4 import BeautifulSoup\nfrom src.service import wordSegment\nfrom src.app import cache\nfrom src.dao import issueconstruction\n\n\ng_word_segment_black_files = os.path.join(os.path.dirname(os.getcwd()), \\\n 'src', \\\n 'cfg', \\\n \"blackList.txt\")\n\ng_align_label = ['iConfig',\n 'iFeatureV7B70',\n 'iCustomer',\n 'iPDTPL',\n 'iPDTSwitch',\n 'iPDTSwitchSeries',\n 'iPDTSwitchSpecs',\n\t'iPDTSwitch',\n 'iPDTPL',\n 'iPDTRouter',\n 'iPDTRouterSeries',\n 'iPDTRouterSpecs',\n 'iPDTNew_network',\n 'iPDTNew_networkSeries',\n 'iPDTNew_networkSpecs',\n 'iPDTIP_Wlan',\n 'iPDTIP_WlanSeries',\n 'iPDTIP_WlanSpecs',\n 'iPDTH3C_Soft',\n 'iPDTH3C_SoftSeries',\n 'iPDTH3C_SoftSpecs',\n 'iPDTIP_Security',\n 'iPDTIP_SecuritySeries',\n 'iPDTIP_SecuritySpecs',\n 'iPDTServer',\n 'iPDTServerSeries',\n 'iPDTServerSpecs',\n 'iPDTStorage',\n 'iPDTStorageSeries',\n 'iPDTStorageSpecs',\n 'iPDTFusion_Architecture',\n 'iPDTFusion_ArchitectureSeries',\n 'iPDTFusion_ArchitectureSpecs',\n 'iPDTH3Cloud',\n 'iPDTH3CloudSeries',\n 'iPDTH3CloudSpecs',\n 'iPDTBig_Data',\n 'iPDTBig_DataSeries',\n 'iPDTBig_DataSpecs',\n 'iPDTIoT',\n 'iPDTIoTSeries',\n 'iPDTIoTSpecs',\n 'iPDTBig_Security',\n 'iPDTBig_SecuritySeries',\n 'iPDTBig_SecuritySpecs',\n 'iPDTLTE',\n 'iPDTLTESeries',\n 'iPDTLTESpecs',\n 'iPDTTransmission',\n 'iPDTTransmissionSeries',\n 'iPDTTransmissionSpecs',\n 'iPDTAngle',\n 'iPDTAngleSeries',\n 'iPDTAngleSpecs',\n 'iPDTCabling',\n 'iPDTCablingSeries',\n 'iPDTCablingSpecs',\n 'iPDTH3CloudOS',\n 'iPDTH3CloudOSSeries',\n 'iPDTH3CloudOSSpecs',\n 'iPDTIntelligence_Center',\n 'iPDTIntelligence_CenterSeries',\n 'iPDTIntelligence_CenterSpecs',\n 'iPDTIntelligence_Home',\n 'iPDTIntelligence_HomeSeries',\n 'iPDTIntelligence_HomeSpecs',\n 'iPDTMini',\n 'iPDTMiniSeries',\n 'iPDTMiniSpecs',\n 'iPDTStandard_Network',\n 'iPDTStandard_NetworkSeries',\n 'iPDTStandard_NetworkSpecs',\n 'iPDTH3C',\n 'iPDTSpecsProperty_Switchs',\n 'iPDTSpecsProperty_Router',\n 'iPDTSpecsProperty_NewNetwork',\n 'iPDTSpecsProperty_IPWlan',\n 'iPDTSpecsProperty_H3CSoft',\n 'iPDTSpecsProperty_IPSecurity',\n 'iPDTSpecsProperty_Server',\n 'iPDTSpecsProperty_Storage',\n 'iPDTSpecsProperty_FusionArchitecture',\n 'iPDTSpecsProperty_H3Cloud',\n 'iPDTSpecsProperty_BigData',\n 'iPDTSpecsProperty_IoT',\n 'iPDTSpecsProperty_BigSecurity',\n 'iPDTSpecsProperty_LTE',\n 'iPDTSpecsProperty_Transmission',\n 'iVerCMWV7Branch',\n 'iVerCMWV7B23',\n 'iVerCMWV7B35',\n 'iVerCMWV7B45',\n 'iVerCMWV7B70',\n 'iVerH3C',\n 'iWordDict',\n 'iRealation']\n\n\n'''\nkey:alias,value:name\n'''\ng_primary_key_align_dict = {}\n\n\ndef load_eneity_align_dict(labelList):\n # label是给定的一个个label\n for label in labelList:\n # 得到label数据\n data = cache.get(label)\n\n for k in data.keys():\n name = k\n alias = data[k]\n\n if common.data_is_NULL(alias):\n continue\n if isinstance(alias,list):\n for a in alias:\n g_primary_key_align_dict.update({a:name})\n else:\n g_primary_key_align_dict.update({alias:name})\n\n return g_primary_key_align_dict\n\n\ndef word_primary_key_align(words, isExpend=True):\n global g_primary_key_align_dict\n\n if common.data_is_NULL(g_primary_key_align_dict):\n g_primary_key_align_dict=load_eneity_align_dict(g_align_label)\n\n if common.data_is_NULL(g_primary_key_align_dict):\n return words\n\n retWords = []\n for w in words:\n if w in g_primary_key_align_dict.keys():\n w1 = g_primary_key_align_dict[w]\n if w1 != w:\n if isExpend:\n retWords.append(w1)\n else:\n w = w1\n retWords.append(w)\n return retWords\n\n\n# 将将嵌套的list转换成一维的list\n# def flat(l):\n# for k in l :\n# if not isinstance(k,(list,tuple)):\n# yield k\n# else:\n# yield from flat(k)\n#\n\ndef merge(row):\n s_dict = dict()\n s_dict[row['name']] = row['searchkey']\n # keys是一个个neme\n for keys in s_dict.keys():\n # 通过key得到值\n list_w = []\n for words in s_dict.get(str(keys)).split(\",\"):\n words = words.replace(\"[\", \"\")\n words = words.replace(\"]\", \"\")\n words = words.lstrip(\" \")\n words = words.rstrip(\" \")\n if words not in list_w:\n list_w.append(words)\n else:\n continue\n w_dict_2 = {\n \"name\": keys,\n \"searchkey\": list_w\n }\n return w_dict_2\n\n\n# ⑤关键字\ndef search(df1,spark):\n # 还没加关键字\n df_search=df1.withColumn(\"searchkey\",f.concat_ws(',',\"describekey\",\"detailkey\"))\n rdd_search2=df_search.rdd.map(lambda row: merge(row))\n schema=StructType([StructField(\"name\",StringType(),True),\n StructField(\"searchkey\",StringType(),True)])\n df=spark.createDataFrame(rdd_search2,schema)\n df.createOrReplaceTempView(\"tb_searchkey\")\n df1.createOrReplaceTempView(\"tb_df1\")\n\n df_end=spark.sql(\"\"\"select a.OWNER,a.PMAnalysis,a.Rname,a.Vname,a.adminAdvice,a.approverComments,\n a.att_file_num1,a.att_file_num3,a.att_img_num1,a.att_img_num3,a.baseline,a.category,a.categoryStr,\n a.causeAnalysis,a.creationdate,a.currentNode,a.currentPerson,a.cut_words,a.defectModifier,a.defectNo,\n a.defect_ODCSeverity,a.developerComments,a.issueProcessor,a.lastProcessed,a.lastupdateTimestamp,\n a.lengthofstay,a.nodeCode,a.nodeName,a.operation_type,a.productLineName,a.productName,a.refresh_timestamp,\n a.solution,a.status,a.submitBy,a.submitDate,a.suspendReason,a.testReport,a.testTool,a.testToolStr,a.testerComments,\n a.name as defectid,a.describe,a.detail,a.describekey,a.detailkey,b.searchkey as summary from\n (select OWNER,PMAnalysis,Rname,Vname,adminAdvice,approverComments,att_file_num1,\n att_file_num3,att_img_num1,att_img_num3,baseline,category,categoryStr,causeAnalysis,\n creationdate,currentNode,currentPerson,cut_words,defectModifier,defectNo,defect_ODCSeverity,\n developerComments,issueProcessor,lastProcessed,lastupdateTimestamp,lengthofstay,nodeCode,nodeName,\n operation_type,productLineName,productName,refresh_timestamp,solution,status,submitBy,submitDate,\n suspendReason,testReport,testTool,testToolStr,testerComments,name,describe,detail,describekey,detailkey from tb_df1)\n a inner join\n (select name as names,searchkey from tb_searchkey) b on a.name = b.names\"\"\")\n df_end.write.saveAsTable(\"quality_carbon_new.es_idms_defect_v4_output\", mode=\"append\")\n spark.stop()\n\n\n# 白名单筛选rdd\ndef match_rule(whiteList,row):\n w_dict=dict()\n w_dict[row['name']]=row['detailkey']\n # keys是一个个neme\n for keys in w_dict.keys():\n # 通过key得到值\n list_w=[]\n for words in w_dict.get(str(keys)).split(\",\"):\n words=str(words)\n words=words.replace(\"[\",\"\")\n words=words.replace(\"]\",\"\")\n words=words.lstrip(\" \")\n words=words.rstrip(\" \")\n list_w.append(words)\n w_dict_2 = {\n \"name\":keys,\n \"detailkey\":list(set(list_w).intersection(set(whiteList)))\n }\n return w_dict_2\n\n\ndef white(spark,df_black):\n retList = []\n whiteListFile = issueconstruction.g_word_segment_white_file\n if common.data_is_NULL(whiteListFile):\n return df_black\n whiteList = common.read_file_lines_to_list(whiteListFile)\n if common.data_is_NULL(whiteList):\n return []\n\n # 将白名单whiteList转换成一行一列的dataframe\n # schema=StructType([StructField(\"white\",StringType(),True)])\n # df_white=spark.createDataFrame(DataFrame(whiteList), schema) # [\"white\"]\n # df_all=df_white.withColumn(\"name\",f.lit(\"白名单\"))\n # df_all.registerTempTable(\"tmp_all\")\n # df_whiteList=spark.sql(\"select concat_ws(',',collect_set(white)) as detailkey from tmp_all group by name\")\n\n rdd=df_black.rdd.map(lambda row: match_rule(whiteList, row))\n schema=StructType([StructField(\"name\",StringType(),True),\n StructField(\"detailkey\",StringType(),True)])\n df_detailkey=spark.createDataFrame(rdd,schema)\n return df_detailkey\n\n\n# ④白名单筛选,\ndef words_white_list_process_new(spark,df_black):\n df_detailkey=white(spark,df_black)\n\n df_detailkey.createOrReplaceTempView(\"tb_detailkey\")\n df_black.createOrReplaceTempView(\"tb_black\")\n df1=spark.sql(\"\"\"select a.OWNER,a.PMAnalysis,a.Rname,a.Vname,a.adminAdvice,a.approverComments,\n a.att_file_num1,a.att_file_num3,a.att_img_num1,a.att_img_num3,a.baseline,a.category,\n a.categoryStr,a.causeAnalysis,a.creationdate,a.currentNode,a.currentPerson,a.cut_words,\n a.defectModifier,a.defectNo,a.defect_ODCSeverity,a.developerComments,a.issueProcessor,\n a.lastProcessed,a.lastupdateTimestamp,a.lengthofstay,a.nodeCode,a.nodeName,a.operation_type,\n a.productLineName,a.productName,a.refresh_timestamp,a.solution,a.status,a.submitBy,a.submitDate,\n a.suspendReason,a.testReport,a.testTool,a.testToolStr,a.testerComments,\n a.name,a.describe,a.detail,a.describekey,b.detailkey from\n (select OWNER,PMAnalysis,Rname,Vname,adminAdvice,approverComments,\n att_file_num1,att_file_num3,att_img_num1,att_img_num3,baseline,\n category,categoryStr,causeAnalysis,creationdate,currentNode,\n currentPerson,cut_words,defectModifier,defectNo,defect_ODCSeverity,\n developerComments,issueProcessor,lastProcessed,lastupdateTimestamp,\n lengthofstay,nodeCode,nodeName,operation_type,productLineName,productName,\n refresh_timestamp,solution,status,submitBy,submitDate,suspendReason,testReport,testTool,testToolStr,testerComments,\n name,describe,detail,describekey from tb_black)\n a inner join\n (select name,detailkey from tb_detailkey) b on a.name = b.name\"\"\")\n\n search(df1,spark)\n\n\n# 黑名单的udf\ndef black(words):\n blackListFile = issueconstruction.g_word_segment_black_files\n if blackListFile is None:\n return not blackListFile\n else:\n retList = []\n for d in words:\n if d in blackListFile:\n continue\n if d in retList:\n continue\n retList.append(d)\n return retList\n\n\n# ③黑名单过滤\ndef words_black_list_process_new(spark,df_primary):\n blackListFile = issueconstruction.g_word_segment_black_files\n\n if common.data_is_NULL(blackListFile):\n return df_primary\n\n blackList = common.read_file_lines_to_list(blackListFile)\n\n if common.data_is_NULL(blackList):\n return df_primary\n\n df_primary.registerTempTable(\"tb_black_content\")\n spark.udf.register(\"black\",black)\n df_black=spark.sql(\"select OWNER,PMAnalysis,Rname,Vname,adminAdvice,approverComments,att_file_num1,\"\n \"att_file_num3,att_img_num1,att_img_num3,baseline,category,categoryStr,causeAnalysis,\"\n \"creationdate,currentNode,currentPerson,cut_words,defectModifier,defectNo,defect_ODCSeverity,\"\n \"developerComments,issueProcessor,lastProcessed,lastupdateTimestamp,lengthofstay,nodeCode,\"\n \"nodeName,operation_type,productLineName,productName,refresh_timestamp,solution,status,\"\n \"submitBy,submitDate,suspendReason,testReport,testTool,testToolStr,testerComments,\"\n \"name,describe,detail,black(describekey) as describekey,detailkey from tb_black_content\")\n\n # 白名单筛选\n words_white_list_process_new(spark,df_black)\n\n\n# ②df是经过html清洗后的数据\ndef discovery_from_wordseg_new(spark,df):\n # 通过dataframe分词\n df.registerTempTable(\"tb_jieba_content\")\n spark.udf.register(\"run\",wordSegment.run)\n # 分词\n df_jieba_content=spark.sql(\"select OWNER,PMAnalysis,Rname,Vname,adminAdvice,approverComments,\"\n \"att_file_num1,att_file_num3,att_img_num1,att_img_num3,baseline,category,\"\n \"categoryStr,causeAnalysis,creationdate,currentNode,currentPerson,cut_words,\"\n \"defectModifier,defectNo,defect_ODCSeverity,developerComments,issueProcessor,\"\n \"lastProcessed,lastupdateTimestamp,lengthofstay,nodeCode,nodeName,operation_type,\"\n \"productLineName,productName,refresh_timestamp,solution,status,submitBy,submitDate,\"\n \"suspendReason,testReport,testTool,testToolStr,testerComments,\"\n \"name,describe,detail,run(describe) as describekey\"\n \",run(detail) as detailkey from tb_jieba_content\")\n # 索引\n df_jieba_content.registerTempTable(\"tb_primary_content\")\n spark.udf.register(\"word_primary_key_align\",word_primary_key_align)\n df_primary=spark.sql(\"select OWNER,PMAnalysis,Rname,Vname,adminAdvice,approverComments,att_file_num1,\"\n \"att_file_num3,att_img_num1,att_img_num3,baseline,category,categoryStr,causeAnalysis,\"\n \"creationdate,currentNode,currentPerson,cut_words,defectModifier,defectNo,defect_ODCSeverity,\"\n \"developerComments,issueProcessor,lastProcessed,lastupdateTimestamp,lengthofstay,nodeCode,\"\n \"nodeName,operation_type,productLineName,productName,refresh_timestamp,solution,status,\"\n \"submitBy,submitDate,suspendReason,testReport,testTool,testToolStr,testerComments,\"\n \"name,describe,detail,word_primary_key_align(describekey) as describekey,\"\n \"word_primary_key_align(detailkey) as detailkey from tb_primary_content\")\n # 测试分词索引后生成数据\n words_black_list_process_new(spark,df_primary)\n\n\n# 将方法设置成udf\ndef clean_content(s):\n s=str(s)\n destStr = ''\n src_soup = BeautifulSoup(s, 'html5lib')\n if src_soup is not None:\n # get_text得到html内容\n src_soup_text = src_soup.get_text()\n if src_soup_text:\n destStr = src_soup_text.replace('\\n', '')\n destStr = destStr.replace('\\t', '')\n destStr = re.sub('\\\\s+', ' ', destStr)\n return destStr\n\n\n# ①用rdd新换\ndef discovery_from_delhtmllabel_new(spark,df_content):\n df_content.registerTempTable(\"tb_content\")\n spark.udf.register(\"clean_content\",clean_content)\n df=spark.sql(\"select OWNER,PMAnalysis,Rname,Vname,adminAdvice,approverComments,att_file_num1,\"\n \"att_file_num3,att_img_num1,att_img_num3,baseline,category,categoryStr,causeAnalysis,\"\n \"creationdate,currentNode,currentPerson,cut_words,defectModifier,defectNo,defect_ODCSeverity,\"\n \"developerComments,issueProcessor,lastProcessed,lastupdateTimestamp,lengthofstay,nodeCode,\"\n \"nodeName,operation_type,productLineName,productName,refresh_timestamp,solution,status,submitBy,\"\n \"submitDate,suspendReason,testReport,testTool,testToolStr,testerComments,name,describe,\"\n \"case when detail is not null or detail != null then clean_content(detail) else detail end detail from tb_content\")\n discovery_from_wordseg_new(spark,df)\n", "sub_path": "02-idms_v2/src/service/kdd.py", "file_name": "kdd.py", "file_ext": "py", "file_size_in_byte": 16095, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 17, "usage_type": "call"}, {"api_name": "src.app.cache.get", "line_number": 130, "usage_type": "call"}, {"api_name": "src.app.cache", "line_number": 130, "usage_type": "name"}, {"api_name": "src.utils.common.data_is_NULL", "line_number": 136, "usage_type": "call"}, {"api_name": "src.utils.common", "line_number": 136, "usage_type": "name"}, {"api_name": "src.utils.common.data_is_NULL", "line_number": 150, "usage_type": "call"}, {"api_name": "src.utils.common", "line_number": 150, "usage_type": "name"}, {"api_name": "src.utils.common.data_is_NULL", "line_number": 153, "usage_type": "call"}, {"api_name": "src.utils.common", "line_number": 153, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.concat_ws", "line_number": 204, "usage_type": "call"}, {"api_name": "pyspark.sql.functions", "line_number": 204, "usage_type": "name"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 206, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 206, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 206, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 207, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 207, "usage_type": "call"}, {"api_name": "src.dao.issueconstruction.g_word_segment_white_file", "line_number": 255, "usage_type": "attribute"}, {"api_name": "src.dao.issueconstruction", "line_number": 255, "usage_type": "name"}, {"api_name": "src.utils.common.data_is_NULL", "line_number": 256, "usage_type": "call"}, {"api_name": "src.utils.common", "line_number": 256, "usage_type": "name"}, {"api_name": "src.utils.common.read_file_lines_to_list", "line_number": 258, "usage_type": "call"}, {"api_name": "src.utils.common", "line_number": 258, "usage_type": "name"}, {"api_name": "src.utils.common.data_is_NULL", "line_number": 259, "usage_type": "call"}, {"api_name": "src.utils.common", "line_number": 259, "usage_type": "name"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 270, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 270, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 270, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 271, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 271, "usage_type": "call"}, {"api_name": "src.dao.issueconstruction.g_word_segment_black_files", "line_number": 306, "usage_type": "attribute"}, {"api_name": "src.dao.issueconstruction", "line_number": 306, "usage_type": "name"}, {"api_name": "src.dao.issueconstruction.g_word_segment_black_files", "line_number": 322, "usage_type": "attribute"}, {"api_name": "src.dao.issueconstruction", "line_number": 322, "usage_type": "name"}, {"api_name": "src.utils.common.data_is_NULL", "line_number": 324, "usage_type": "call"}, {"api_name": "src.utils.common", "line_number": 324, "usage_type": "name"}, {"api_name": "src.utils.common.read_file_lines_to_list", "line_number": 327, "usage_type": "call"}, {"api_name": "src.utils.common", "line_number": 327, "usage_type": "name"}, {"api_name": "src.utils.common.data_is_NULL", "line_number": 329, "usage_type": "call"}, {"api_name": "src.utils.common", "line_number": 329, "usage_type": "name"}, {"api_name": "src.service.wordSegment.run", "line_number": 350, "usage_type": "attribute"}, {"api_name": "src.service.wordSegment", "line_number": 350, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 380, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 387, "usage_type": "call"}]} +{"seq_id": "285841872", "text": "import pygame\nfrom pygame.sprite import Sprite\n\nclass Pokeball(Sprite):\n\n def __init__(self, ai_settings, screen):\n super().__init__()\n\n self.screen = screen\n self.image = pygame.image.load('images/pokeball.bmp')\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n self.ai_settings = ai_settings\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n self.center = float(self.rect.centerx)", "sub_path": "alien_invasion/pokeball.py", "file_name": "pokeball.py", "file_ext": "py", "file_size_in_byte": 513, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 4, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 10, "usage_type": "attribute"}]} +{"seq_id": "243586702", "text": "# From http://deeplearning.net/software/theano/tutorial/adding.html#adding-two-scalars\n\nimport theano.tensor as T\nfrom theano import function\n\n# Step 1:\n# Define two symbols (Variables) representing the quantities that you want to add.\n# 'x' and 'y' are instances of TensorVariable of theano Type dscalar in their type field.\n# T.dscalar is the type we assign to \"0-dimensional arrays (scalar) of doubles (d).\nx = T.dscalar('x')\ny = T.dscalar('y')\n\n# Step 2:\n# Combine 'x' and 'y' into their sum 'z'.\n# 'z' is yet another variable which represents the addition of 'x' and 'y'.\nz = x + y\n\n# Step 3:\n# Create a theano function taking 'x' and 'y' as inputs and giving 'z' as output.\n# A theano function takes a list of input symbol variables as the first argument and\n# an output symbol variable or list of output symbol variables as the second argument.\nf = function([x, y], z)\n\n\n# Apply theano function 'f' over various inputs and display output\nprint(\"f(2, 3) = \" + str(f(2, 3)))\nprint(\"f(16.3, 12.1) = \" + str(f(16.3, 12.1)))", "sub_path": "exercises/baby_steps_algebra/adding_two_scalars.py", "file_name": "adding_two_scalars.py", "file_ext": "py", "file_size_in_byte": 1026, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "theano.tensor.dscalar", "line_number": 10, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 10, "usage_type": "name"}, {"api_name": "theano.tensor.dscalar", "line_number": 11, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 11, "usage_type": "name"}, {"api_name": "theano.function", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "653783613", "text": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016 - 2019 Detlev Offenbach \n#\n\n\"\"\"\nModule implementing the automatic scroller.\n\"\"\"\n\n#\n# This module is based on the Qupzilla auto scroller.\n# Copyright (C) 2014 David Rosca \n#\n\n\nfrom PyQt5.QtCore import Qt, QObject, QRect, QEvent, QPoint\nfrom PyQt5.QtWidgets import QApplication, QLabel\n\nfrom .FrameScroller import FrameScroller\n\nimport Preferences\nimport UI.PixmapCache\n\n\nclass AutoScroller(QObject):\n \"\"\"\n Class implementing the automatic scroller.\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n Constructor\n \n @param parent reference to the parent object\n @type QObject\n \"\"\"\n super(AutoScroller, self).__init__(parent)\n \n self.__view = None\n \n self.__indicator = QLabel()\n self.__indicator.resize(32, 32)\n self.__indicator.setContentsMargins(0, 0, 0, 0)\n self.__indicator.installEventFilter(self)\n \n self.__scroller = FrameScroller(self)\n self.__scroller.setScrollDivider(\n Preferences.getWebBrowser(\"AutoScrollDivider\"))\n \n self.__enabled = Preferences.getWebBrowser(\"AutoScrollEnabled\")\n \n def isEnabled(self):\n \"\"\"\n Public method to get the enabled state.\n \n @return enabled state\n @rtype bool\n \"\"\"\n return self.__enabled\n \n def mouseMove(self, evt):\n \"\"\"\n Public method to handle mouse move events.\n \n @param evt reference to the mouse move event\n @type QMouseEvent\n @return flag indicating, that the event was handled\n @rtype bool\n \"\"\"\n if self.__enabled and self.__indicator.isVisible():\n rect = self.__indicatorGlobalRect()\n xlen = 0\n ylen = 0\n egp = evt.globalPos()\n \n if rect.left() > egp.x():\n xlen = egp.x() - rect.left()\n elif rect.right() < egp.x():\n xlen = egp.x() - rect.right()\n \n if rect.top() > egp.y():\n ylen = egp.y() - rect.top()\n elif rect.bottom() < egp.y():\n ylen = egp.y() - rect.bottom()\n \n self.__scroller.startScrolling(xlen, ylen)\n \n return False\n \n def mousePress(self, view, evt):\n \"\"\"\n Public method to handle mouse button presses.\n \n @param view reference to the web view the button was pressed on\n @type WebBrowserView\n @param evt reference to the mouse button press event\n @type QMouseEvent\n @return flag indicating, that the event was handled\n @rtype bool\n \"\"\"\n if self.__enabled:\n middleButton = evt.buttons() == Qt.MiddleButton\n \n if view:\n # test for start\n if self.__view != view and middleButton:\n return self.__showIndicator(view, evt.pos())\n elif not self.__indicator.isVisible() and middleButton:\n return self.__showIndicator(view, evt.pos())\n \n # test for stop\n if self.__indicator.isVisible():\n self.__stopScrolling()\n return True\n \n return False\n \n def mouseRelease(self, evt):\n \"\"\"\n Public method to handle mouse button releases.\n \n @param evt reference to the mouse button release event\n @type QMouseEvent\n @return flag indicating, that the event was handled\n @rtype bool\n \"\"\"\n if self.__enabled and self.__indicator.isVisible():\n if not self.__indicatorGlobalRect().contains(\n evt.globalPos()):\n self.__stopScrolling()\n return True\n \n return False\n \n def wheel(self):\n \"\"\"\n Public method to handle a mouse wheel event.\n \n @return flag indicating, that the event was handled\n @rtype bool\n \"\"\"\n if self.__enabled and self.__indicator.isVisible():\n self.__stopScrolling()\n return True\n \n return False\n \n def preferencesChanged(self):\n \"\"\"\n Public method to handle a change of the settings.\n \"\"\"\n enabled = Preferences.getWebBrowser(\"AutoScrollEnabled\")\n if enabled != self.__enabled:\n if self.__indicator.isVisible():\n self.__stopScrolling()\n self.__enabled = enabled\n \n self.__scroller.setScrollDivider(\n Preferences.getWebBrowser(\"AutoScrollDivider\"))\n \n def eventFilter(self, obj, evt):\n \"\"\"\n Public method to handle event for an object.\n \n @param obj refernce to the object sending the event\n @type QObject\n @param evt reference to the event to be handled\n @type QEvent\n @return flag indicating, that the event was handled\n @rtype bool\n \"\"\"\n if obj == self.__indicator:\n if evt.type() == QEvent.Enter:\n self.__scroller.stopScrolling()\n elif evt.type() in [QEvent.Wheel, QEvent.Hide,\n QEvent.MouseButtonPress]:\n self.__stopScrolling()\n \n return False\n \n def __showIndicator(self, view, pos):\n \"\"\"\n Private method to show the auto scroll indicator.\n \n @param view reference to the view to show the indicator on\n @type WebBrowserView\n @param pos position to show the indicator at\n @type QPoint\n @return flag indicating, that the indicator is shown\n @rtype bool\n \"\"\"\n hit = view.page().hitTestContent(pos)\n \n if hit.isContentEditable() or not hit.linkUrl().isEmpty():\n return False\n \n jsSource = \"\"\"\n var out = {\n vertical:\n window.innerWidth > document.documentElement.clientWidth,\n horizontal:\n window.innerHeight > document.documentElement.clientHeight\n };\n out;\"\"\"\n \n res = view.page().execJavaScript(jsSource)\n if res is None:\n return False\n \n vertical = res[\"vertical\"]\n horizontal = res[\"horizontal\"]\n if not vertical and not horizontal:\n return False\n \n if vertical and horizontal:\n self.__indicator.setPixmap(\n UI.PixmapCache.getPixmap(\"scrollAll.png\"))\n elif vertical:\n self.__indicator.setPixmap(\n UI.PixmapCache.getPixmap(\"scrollVertical.png\"))\n else:\n self.__indicator.setPixmap(\n UI.PixmapCache.getPixmap(\"scrollHorizontal.png\"))\n \n self.__view = view\n p = QPoint(\n pos.x() - self.__indicator.pixmap().width() // 2,\n pos.y() - self.__indicator.pixmap().height() // 2\n )\n \n self.__indicator.setParent(self.__view)\n self.__indicator.move(p)\n self.__indicator.show()\n \n self.__scroller.setPage(view.page())\n \n self.__view.inputWidget().grabMouse()\n QApplication.setOverrideCursor(Qt.ArrowCursor)\n \n return True\n \n def __stopScrolling(self):\n \"\"\"\n Private method to stop scrolling.\n \"\"\"\n self.__view.inputWidget().releaseMouse()\n QApplication.restoreOverrideCursor()\n \n self.__indicator.hide()\n self.__indicator.setParent(None)\n self.__scroller.stopScrolling()\n \n def __indicatorGlobalRect(self):\n \"\"\"\n Private method to calculate the global indicator parameters.\n \n @return global indicator parameters\n @rtype QRect\n \"\"\"\n pos = self.__indicator.parentWidget().mapToGlobal(\n self.__indicator.geometry().topLeft())\n return QRect(pos.x(), pos.y(),\n self.__indicator.width(), self.__indicator.height())\n", "sub_path": "PYTHON/Python_GUI/eric6-19.11/eric/eric6/WebBrowser/AutoScroll/AutoScroller.py", "file_name": "AutoScroller.py", "file_ext": "py", "file_size_in_byte": 8116, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "PyQt5.QtCore.QObject", "line_number": 25, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 40, "usage_type": "call"}, {"api_name": "FrameScroller.FrameScroller", "line_number": 45, "usage_type": "call"}, {"api_name": "Preferences.getWebBrowser", "line_number": 47, "usage_type": "call"}, {"api_name": "Preferences.getWebBrowser", "line_number": 49, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.MiddleButton", "line_number": 101, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 101, "usage_type": "name"}, {"api_name": "Preferences.getWebBrowser", "line_number": 151, "usage_type": "call"}, {"api_name": "Preferences.getWebBrowser", "line_number": 158, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QEvent.Enter", "line_number": 172, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QEvent", "line_number": 172, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QEvent.Wheel", "line_number": 174, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QEvent", "line_number": 174, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QEvent.Hide", "line_number": 174, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QEvent.MouseButtonPress", "line_number": 175, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QEvent", "line_number": 175, "usage_type": "name"}, {"api_name": "UI.PixmapCache.PixmapCache.getPixmap", "line_number": 216, "usage_type": "call"}, {"api_name": "UI.PixmapCache.PixmapCache", "line_number": 216, "usage_type": "attribute"}, {"api_name": "UI.PixmapCache", "line_number": 216, "usage_type": "name"}, {"api_name": "UI.PixmapCache.PixmapCache.getPixmap", "line_number": 219, "usage_type": "call"}, {"api_name": "UI.PixmapCache.PixmapCache", "line_number": 219, "usage_type": "attribute"}, {"api_name": "UI.PixmapCache", "line_number": 219, "usage_type": "name"}, {"api_name": "UI.PixmapCache.PixmapCache.getPixmap", "line_number": 222, "usage_type": "call"}, {"api_name": "UI.PixmapCache.PixmapCache", "line_number": 222, "usage_type": "attribute"}, {"api_name": "UI.PixmapCache", "line_number": 222, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 225, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.setOverrideCursor", "line_number": 237, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 237, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.ArrowCursor", "line_number": 237, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 237, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.restoreOverrideCursor", "line_number": 246, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 246, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 261, "usage_type": "call"}]} +{"seq_id": "501758381", "text": "import cv2 # computer vision library\nimport helpers # helper functions\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg # for loading in images\nget_ipython().run_line_magic('matplotlib', 'inline')\nIMAGE_DIR_TRAINING = \"traffic_light_images/training/\"\nIMAGE_DIR_TEST = \"traffic_light_images/test/\"\nIMAGE_LIST = helpers.load_dataset(IMAGE_DIR_TRAINING)\nn = 0\nselected_label = IMAGE_LIST[n][1]\nwhile selected_label != \"yellow\":\n n += 1\n selected_label = IMAGE_LIST[n][1]\n\nselected_image = IMAGE_LIST[n][0]\nplt.imshow(selected_image)\nprint(selected_label)\nprint(n)\ndef standardize_input(image):\n standard_im = np.copy(image) \n return cv2.resize(standard_im, (32,32))\n\ndef one_hot_encode(label):\n colors = {\"red\" : 0,\n \"yellow\" : 1,\n \"green\" : 2}\n one_hot_encoded = [0]*len(colors)\n one_hot_encoded[colors[label]] = 1\n \n return one_hot_encoded\nimport test_functions\ntests = test_functions.Tests()\ntests.test_one_hot(one_hot_encode)\ndef standardize(image_list):\n standard_list = []\n for item in image_list:\n image = item[0]\n label = item[1]\n standardized_im = standardize_input(image)\n one_hot_label = one_hot_encode(label) \n standard_list.append((standardized_im, one_hot_label))\n \n return standard_list\nSTANDARDIZED_LIST = standardize(IMAGE_LIST)\nn = 800\nselected_image = STANDARDIZED_LIST[n][0]\nplt.imshow(selected_image)\nselected_label = STANDARDIZED_LIST[n][1]\nprint(selected_label)\nimage_num = 0\ntest_im = STANDARDIZED_LIST[image_num][0]\ntest_label = STANDARDIZED_LIST[image_num][1]\nhsv = cv2.cvtColor(test_im, cv2.COLOR_RGB2HSV)\n\nprint('Label [red, yellow, green]: ' + str(test_label))\nh = hsv[:,:,0]\ns = hsv[:,:,1]\nv = hsv[:,:,2]\nf, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(20,10))\nax1.set_title('Standardized image')\nax1.imshow(test_im)\nax2.set_title('H channel')\nax2.imshow(h, cmap='gray')\nax3.set_title('S channel')\nax3.imshow(s, cmap='gray')\nax4.set_title('V channel')\nax4.imshow(v, cmap='gray')\ndef create_feature(rgb_image):\n hsv = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV)\n h = hsv[:,:,0]\n s = hsv[:,:,1]\n v = hsv[:,:,2]\n kernel = np.array([[ -4, -4, -4], \n [ -4, 32, -4], \n [ -4, -4, -4]])\n s_edges = cv2.filter2D(s, -1, kernel)\n blur = np.array([[ 1/9, 1/9, 1/9], \n [ 1/9, 1/9, 1/9], \n [ 1/9, 1/9, 1/9]])\n s_blur = cv2.filter2D(s, -1, kernel)\n for i in range(20):\n s_blur = cv2.filter2D(s_blur, -1, blur)\n \n #Create mask based on blurred edges in s\n s_blur_avg = int(np.sum(s_blur)/(len(s_blur)*len(s_blur[0])))\n s_blur_std = int(np.std(s_blur))\n s_mask = np.greater(s_blur, s_blur_avg+s_blur_std)\n \n #apply the mask to v\n v_mask = v\n v_mask[s_mask == 0] = [0]\n v_top = np.sum(v_mask[0:15])\n v_middle = np.sum(v_mask[7:23])\n v_bottom = np.sum(v_mask[15:31])\n v_sum = v_top + v_middle + v_bottom\n feature = [v_top/v_sum, v_middle/v_sum, v_bottom/v_sum]\n \n return feature\n\nimage_num = 723\ntest_im = STANDARDIZED_LIST[image_num][0]\ncreate_feature(test_im)\ndef create_feature2(rgb_image):\n\n ##Convert image to HSV color space\n hsv = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV)\n h = hsv[:,:,0]\n s = hsv[:,:,1]\n v = hsv[:,:,2]\n \n # Detect edges in S \n # 3x3 edge detection filters\n kernel = np.array([[ -4, -4, -4], \n [ -4, 32, -4], \n [ -4, -4, -4]])\n s_edges = cv2.filter2D(s, -1, kernel)\n \n # Blur edges. Need to blur enough so that areas with signification changes in saturation bleed into each other\n blur = np.array([[ 1/9, 1/9, 1/9], \n [ 1/9, 1/9, 1/9], \n [ 1/9, 1/9, 1/9]])\n s_blur = cv2.filter2D(s, -1, kernel)\n for i in range(20):\n s_blur = cv2.filter2D(s_blur, -1, blur)\n \n #Create mask based on blurred edges in s\n s_blur_avg = int(np.sum(s_blur)/(len(s_blur)*len(s_blur[0])))\n s_blur_std = int(np.std(s_blur))\n s_mask = np.greater(s_blur, s_blur_avg+s_blur_std)\n \n #apply the mask to h\n h_mask = h\n h_mask[s_mask == 0] = [0]\n \n feature = np.sum(h_mask/360)/np.sum(s_mask)\n \n return feature\n\nimage_num = 2\ntest_im = STANDARDIZED_LIST[image_num][0]\ncreate_feature2(test_im)\ndef estimate_label(rgb_image):\n \n feature = np.array(create_feature(rgb_image))\n predicted_label = [0, 0, 0]\n if create_feature2(rgb_image) > 0.38:\n predicted_label[0] = 1\n else:\n predicted_label[feature.argmax(axis=0)] = 1\n \n return predicted_label \n\nimage_num = 723\ntest_im = STANDARDIZED_LIST[image_num][0]\nestimate_label(test_im)\nTEST_IMAGE_LIST = helpers.load_dataset(IMAGE_DIR_TEST)\n\n# Standardize the test data\nSTANDARDIZED_TEST_LIST = standardize(TEST_IMAGE_LIST)\n\n# Shuffle the standardized test data\nrandom.shuffle(STANDARDIZED_TEST_LIST)\n\n\n# In[60]:\n\n\n# Constructs a list of misclassified images given a list of test images and their labels\n# This will throw an AssertionError if labels are not standardized (one-hot encoded)\n\ndef get_misclassified_images(test_images):\n # Track misclassified images by placing them into a list\n misclassified_images_labels = []\n\n # Iterate through all the test images\n # Classify each image and compare to the true label\n for image in test_images:\n\n # Get true data\n im = image[0]\n true_label = image[1]\n assert(len(true_label) == 3), \"The true_label is not the expected length (3).\"\n\n # Get predicted label from your classifier\n predicted_label = estimate_label(im)\n assert(len(predicted_label) == 3), \"The predicted_label is not the expected length (3).\"\n\n # Compare true and predicted labels \n if(predicted_label != true_label):\n # If these labels are not equal, the image has been misclassified\n misclassified_images_labels.append((im, predicted_label, true_label))\n \n # Return the list of misclassified [image, predicted_label, true_label] values\n return misclassified_images_labels\n\n\n# Find all misclassified images in a given test set\nMISCLASSIFIED = get_misclassified_images(STANDARDIZED_TEST_LIST)\n\n# Accuracy calculations\ntotal = len(STANDARDIZED_TEST_LIST)\nnum_correct = total - len(MISCLASSIFIED)\naccuracy = num_correct/total\n\nprint('Accuracy: ' + str(accuracy))\nprint(\"Number of misclassified images = \" + str(len(MISCLASSIFIED)) +' out of '+ str(total))\nn = 6\nselected_image = MISCLASSIFIED[n][0]\nprint(create_feature2(selected_image))\nplt.imshow(selected_image)\nimport test_functions\ntests = test_functions.Tests()\n\nif(len(MISCLASSIFIED) > 0):\n # Test code for one_hot_encode function\n tests.test_red_as_green(MISCLASSIFIED)\nelse:\n print(\"MISCLASSIFIED may not have been populated with images.\")\n\n\n# In[ ]:\n\n\n\n\n", "sub_path": "Traffi Light Classifier.py", "file_name": "Traffi Light Classifier.py", "file_ext": "py", "file_size_in_byte": 6944, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "helpers.load_dataset", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.copy", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 23, "usage_type": "call"}, {"api_name": "test_functions.Tests", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2HSV", "line_number": 55, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2HSV", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.greater", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 96, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 108, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2HSV", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 121, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 124, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.greater", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 146, "usage_type": "call"}, {"api_name": "helpers.load_dataset", "line_number": 158, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "test_functions.Tests", "line_number": 214, "usage_type": "call"}]} +{"seq_id": "330909571", "text": "from multiprocessing import Process\nimport time\n\n\ndef process_A():\n print('process 1 running')\n time.sleep(5)\n print('process 1 running again')\n\n\ndef process_B():\n print('process 2 running')\n time.sleep(2)\n print('process 2 running again')\n\n'''\nstart后p1,p2会不断切换运行,join之后依然会不断切换运行,\n只是要等join的调用者在切换调用的过程中运行完毕之后才会向下执行\n既:在join前start的函数都会不断切换执行,若有函数调用join,\n则需在切换不断调用的过程中等待该函数执行完成后才开始执行join以下的代码\n'''\nif __name__ == '__main__':\n p1 = Process(target=process_A)\n p2 = Process(target=process_B)\n p1.start()\n p2.start()\n p1.join()\n # p2.join()\n # time.sleep(10)\n print('Back to main process')", "sub_path": "practice/other_stuff/multiprocess.py", "file_name": "multiprocess.py", "file_ext": "py", "file_size_in_byte": 835, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "time.sleep", "line_number": 7, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 23, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "535873301", "text": "#\n# You are on your own!\n#\nimport sys\nfrom operator import add\nfrom pyspark import SparkContext\n\nif __name__ == \"__main__\":\n \n ##\n ## Parse the arguments\n ##\n sc = SparkContext( appName=\"Geolocate\" )\n infile1 = 's3://gu-anly502/maxmind/GeoLite2-Country-Blocks-IPv4.csv'\n infile2 = 's3://gu-anly502/maxmind/GeoLite2-Country-Locations-en.csv'\n \n lines1 = sc.textFile(infile1)\n lines2 = sc.textFile(infile2)\n \n inlines1 = lines1.zipWithIndex().filter(lambda x : x[1]>0).map(lambda x : x[0])\n inlines2 = lines2.zipWithIndex().filter(lambda x : x[1]>0).map(lambda x : x[0])\n \n join1 = lines1.map(lambda x: (x[1], x.split(',')[0]).split('/')[0])\n join2 = lines2.map(lambda x: (x[0], x[-1]))\n joined = join1.join(join2)\n ip_country = joined.map(lambda x : x[1])\n ip_country.collect()\n \n\n", "sub_path": "PS04/forensicswiki_geolocate.py", "file_name": "forensicswiki_geolocate.py", "file_ext": "py", "file_size_in_byte": 845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pyspark.SparkContext", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "90370770", "text": "__author__ = 'sgkim'\n'''\nUPDATE:\n2020-08-24: some participants (P11, P13, ...) have 2 additional EOG channels with inaccurate setting (KIND=2 \"EEG\"), which caused an error while interpolating bad channels. The data info was modified so that it could run.\n'''\n\nimport sys, os\nfrom mne.preprocessing import read_ica\nfrom mne.io import read_raw_fif\nfrom mne import pick_types\n\n## Read system arguments:\nif len(sys.argv) < 5:\n print(\"\"\"\n USAGE:\n $ python applyica.py \n \n EXAMPLE:\n $ python applyica.py P01 /media/sgk/Ext4_6TB/externaldata/openmiir/eeg/raw/ /media/sgk/Ext4_6TB/externaldata/openmiir/eeg/ica/ /media/sgk/Ext4_6TB/externaldata/openmiir/eeg/proc/\n \"\"\")\n sys.exit()\n\n## CHECK FILES/PATHS:\nsep = os.path.sep\nsub = sys.argv[1]\nfn_raw = f\"{sys.argv[2]}{sep}{sub}-raw.fif\"\nfn_ica = f\"{sys.argv[3]}{sep}{sub}-100p_64c-ica.fif\"\nfn_out = f\"{sys.argv[4]}{sep}{sub}-bpf-ica-raw.fif\"\nprint(f'INPUT RAW = {fn_raw}')\nassert os.path.isfile(fn_raw), f\"File [{fn_raw}] not found!\"\nprint(f'INPUT ICA = {fn_ica}')\nassert os.path.isfile(fn_ica), f\"File [{fn_ica}] not found!\"\nprint(f'OUTPUT PROC = {fn_out}')\nassert os.path.isdir(sys.argv[4]), f\"Directory [{sys.argv[4]}] not found!\"\n\n## CHECK DATA:\nraw = read_raw_fif(fn_raw, preload=True)\n#print(f\"BAD CHANNELS = {raw.info['bads']}\")\nprint(raw.info)\n\n# P11+: additional 2 EOGs are incorrectly marked as EEG:\nch_kinds = [raw.info['chs'][i]['kind'] for i in range(len(raw.info['chs']))]\nif ch_kinds.count(2) > 64: # too many eeg(kind=2) channels!\n print(F\"__WARNING__ Too many {ch_kinds.count(2)} 'eeg' channles are found! (should be 64). Manually correcting them.\")\n raw.info['chs'][68]['kind'] = 202 # 2=EEG, 202=EOG\n raw.info['chs'][69]['kind'] = 202\n\n## Now, let's redo the pipeline:\n# bad channel interpolation\nraw.interpolate_bads(origin=[0, 0, 0])\n\n# Bandpass filtering 0.5-30 Hz (FFT?)\neeg_pick = pick_types(raw.info, meg=False, eeg=True,\n eog=False, stim=False, exclude=[])\nraw.filter(0.5, 30, picks=eeg_pick, filter_length='40s',\n l_trans_bandwidth=0.1, h_trans_bandwidth=0.5, method='fft',\n n_jobs=4, verbose=True)\n\n## READ & APPLY ICA matrix\nica = read_ica(fn_ica)\nprint(f\"BAD COMPONENTS = {ica.exclude}\")\nica.apply(raw, exclude=ica.exclude)\n\nraw.save(fname=fn_out)\n", "sub_path": "eeg/preprocessing/applyica/applyica.py", "file_name": "applyica.py", "file_ext": "py", "file_size_in_byte": 2373, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 34, "usage_type": "attribute"}, {"api_name": "mne.io.read_raw_fif", "line_number": 37, "usage_type": "call"}, {"api_name": "mne.pick_types", "line_number": 53, "usage_type": "call"}, {"api_name": "mne.preprocessing.read_ica", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "21706807", "text": "# coding=utf-8\nfrom decimal import Decimal\nfrom datetime import datetime\nfrom pony.orm import *\n\ndb = Database(\"sqlite\", \"database.sqlite\", create_db=True)\n\n\nclass Adv(db.Entity):\n\t_table_ = \"Advs\"\n\tid = PrimaryKey(int, auto=True)\n\tuser = Required(\"User\")\n\tname = Required(str)\n\tyear = Required(int)\n\tprice = Required(int)\n\tcomments = Required(LongStr)\n\tmileage = Optional(Decimal)\n\tcar = Required(\"Car\")\n\tphotos = Set(\"Photo\")\n\n\nclass User(db.Entity):\n\t_table_ = \"Users\"\n\tlogin = PrimaryKey(str)\n\tpassword = Required(str)\n\temail = Required(LongStr)\n\tstatus = Required(str)\n\tcounty = Required(str)\n\tFIO = Optional(LongStr)\n\ttelephone = Required(int)\n\tcity = Required(str)\n\tmessages = Set(\"Message\")\n\tadvs = Set(Adv)\n\ttype = Required(int)\n\tcomments = Set(\"Comment\")\n\n\nclass Photo(db.Entity):\n\t_table_ = \"Photos\"\n\tid = PrimaryKey(int, auto=True)\n\tfilename = Required(str)\n\tadv = Required(Adv)\n\n\nclass Message(db.Entity):\n\t_table_ = \"Messages\"\n\tid = PrimaryKey(int, auto=True)\n\tuserto = Required(str)\n\tdatetime = Required(datetime)\n\tcontent = Required(str, 500, lazy=True)\n\tuser = Required(User)\n\n\nclass Car(db.Entity):\n\tid = PrimaryKey(int, auto=True)\n\t_table_ = \"Cars\"\n\tmodel = Required(\"Model\")\n\tadvs = Set(Adv)\n\tcomments = Set(\"Comment\")\n\tapproved = Required(bool)\n\ttransmission = Optional(str)\n\tcolor = Optional(str)\n\tmotor_power = Optional(float)\n\tbody_type = Optional(str)\n\tmotor_type = Optional(str)\n\trudder = Optional(str)\n\nclass Automark(db.Entity):\n\t_table_ = \"Automarks\"\n\tname = PrimaryKey(str)\n\tdescription = Required(str)\n\tmodels = Set(\"Model\")\n\t\nclass Model(db.Entity):\n\t_table_ = \"Models\"\n\tautomark = Required(\"Automark\")\n\tname = PrimaryKey(str)\n\tcars = Set(Car)\n\n\nclass Comment(db.Entity):\n\t_table_ = \"Comments\"\n\tid = PrimaryKey(int, auto=True)\n\tuser = Required(User)\n\tcar = Required(Car)\n\tcontent = Required(str, 1000)\n\tmark = Required(int)\n\n\nsql_debug(True)\ndb.generate_mapping(create_tables=True)\n\n\n@db_session\ndef populate_database():\n\tif select(s for s in User).count() > 0:\n\t\treturn\n\tu1 = User(login=\"jonnny\", password=\"123456\",\n\t\t\t email=\"joyiee@google.com\", status=\"Confirmed\",\n\t\t\t county=\"USA\", FIO=\"Endrue Jonatan Ferhemshtein\",\n\t\t\t telephone=3345144, city=\"New York\",\n\t\t\t type=0)\n\tu2 = User(login=\"dummy34\", password=\"654321\",\n\t\t\t email=\"mich47@google.com\", status=\"Confirmed\",\n\t\t\t county=\"Germany\", FIO=\"Michael fon Aubershazen\",\n\t\t\t telephone=35745685, city=\"Berlin\",\n\t\t\t type=0)\n\tu3 = User(login=\"betty4\", password=\"123321\",\n\t\t\t email=\"jonny@google.com\", status=\"NotConfirmed\",\n\t\t\t county=\"GB\", FIO=\"ser John Brown\",\n\t\t\t telephone=73542622, city=\"London\",\n\t\t\t type=0)\n\tm1 = Automark(name = \"KIA\", description = u\"Kia Motors Corporation — корейская автомобилестроительная компания, второй автопроизводитель в Южной Корее и седьмой в мире.\")\n\tm2 = Automark(name = \"Ford\", description = u\"Ford — американская автомобилестроительная компания, производитель автомобилей под марками «Ford».\")\n\tm3 = Automark(name = \"Chevrolet\", description = u\"Chevrolet, — марка автомобилей, производимых и реализуемых одноимённым экономически самостоятельным подразделением корпорации General Motors.\")\n\t\n\tmd1 = Model(automark=m1, name=\"Ceed\")\n\tmd2 = Model(automark=m1, name=\"Cerato\")\n\tmd3 = Model(automark=m2, name=\"Focus\")\n\tmd4 = Model(automark=m3, name=\"Cruze\")\n\n\tc1 = Car(model=md1, approved=False,\n\t\t\t transmission=\"mechanic\", color=\"red\",\n\t\t\t motor_power=1.2, body_type=\"hatchback\",\n\t\t\t motor_type=\"diesel\", rudder=\"left\")\n\tc2 = Car(model=md2, approved=True,\n\t\t\t transmission=\"automate\", color=\"blue\",\n\t\t\t motor_power=4.4, body_type=\"sedan\",\n\t\t\t motor_type=\"gasoline\", rudder=\"right\")\n\tc3 = Car(model=md3, approved=True,\n\t\t\t transmission=\"mechanic\", color=\"green\",\n\t\t\t motor_power=2.4, body_type=\"hatchback\",\n\t\t\t motor_type=\"diesel\", rudder=\"left\")\n\tc4 = Car(model=md4, approved=False,\n\t\t\ttransmission=\"mechanic\", color=\"black\",\n\t\t\tmotor_power=3.4, body_type=\"wagon\",\n\t\t\tmotor_type=\"gasoline\", rudder=\"left\")\n\tc5 = Car(model=md1, approved=True,\n\t\t\ttransmission=\"mechanic\", color=\"black\",\n\t\t\t motor_power=1.4, body_type=\"hatchback\",\n\t\t\t motor_type=\"diesel\", rudder=\"left\")\n\tCom1 = Comment(id=1, user=u1,\n\t\t\t\t car=c1, content=u\"Машина отличная мне понравилась всем советуюю на пять сплюсом\", \n\t\t\t\t mark=5)\n\tCom2 = Comment(id=2, user=u1,\n\t\t\t\t car=c2, content=u\"Эта модедь мне не очень понравилась эта модель на любителя\",\n\t\t\t\t mark=2)\n\t\t\t\t \n\ta1 = Adv(user=u1, name=u\"Тачка ОГОНЬ\",\n\t\t\tyear=2000, price=150000,\n\t\t\tmileage=65000, car=c1, comments=u\"Быстрая, мощная, без документов\")\n\ta2 = Adv(user=u2, name=u\"Продам ласточку\",\n\t\t\tyear=1995, price=100000,\n\t\t\tmileage=180000, car=c2, comments=u\"Хорошая машина, долго служила\")\n\ta3 = Adv(user=u3, name=u\"Продаю супер машину\",\n\t\t\tyear=2006, price=2000000,\n\t\t\tmileage=32000, car=c3, comments=u\"Хорошая машинка, битая немного\")\n\ta4 = Adv(user=u1, name=u\"Шевролет\",\n\t\t\tyear=2012, price=250000,\n\t\t\tmileage=48000, car=c4, comments=u\"Хорошая, быстро стартует, немного пороги битые\")\n\n\tP1 = Photo(filename =\"43dbd3af.jpg\",adv=a1)\n\tP2 = Photo(filename =\"29ecf6e9.jpg\",adv=a2)\n\tP3 = Photo(filename =\"3f0e3d67.jpg\",adv=a3)\n\tP4 = Photo(filename =\"aa5ae820.jpg\",adv=a4)\n\n # d2 = Department(name=\"Department of Mathematical Sciences\")\n\t# d3 = Department(name=\"Department of Applied Physics\")\n\t#\n\t# c1 = Course(name=\"Web Design\", semester=1, dept=d1,\n\n# lect_hours=30, lab_hours=30, credits=3)\n# c2 = Course(name=\"Data Structures and Algorithms\", semester=3, dept=d1,\n# lect_hours=40, lab_hours=20, credits=4)\n#\n# c3 = Course(name=\"Linear Algebra\", semester=1, dept=d2,\n# lect_hours=30, lab_hours=30, credits=4)\n# c4 = Course(name=\"Statistical Methods\", semester=2, dept=d2,\n# lect_hours=50, lab_hours=25, credits=5)\n#\n# c5 = Course(name=\"Thermodynamics\", semester=2, dept=d3,\n# lect_hours=25, lab_hours=40, credits=4)\n# c6 = Course(name=\"Quantum Mechanics\", semester=3, dept=d3,\n# lect_hours=40, lab_hours=30, credits=5)\n#\n# g101 = Group(number=101, major='B.E. in Computer Engineering', dept=d1)\n# g102 = Group(number=102, major='B.S./M.S. in Computer Science', dept=d1)\n# g103 = Group(number=103, major='B.S. in Applied Mathematics and Statistics', dept=d2)\n# g104 = Group(number=104, major='B.S./M.S. in Pure Mathematics', dept=d2)\n# g105 = Group(number=105, major='B.E in Electronics', dept=d3)\n# g106 = Group(number=106, major='B.S./M.S. in Nuclear Engineering', dept=d3)\n#\n# s1 = Student(name='John Smith', dob=date(1991, 3, 20), tel='123-456', gpa=3, group=g101,\n# courses=[c1, c2, c4, c6])\n# s2 = Student(name='Matthew Reed', dob=date(1990, 11, 26), gpa=3.5, group=g101,\n# courses=[c1, c3, c4, c5])\n# s3 = Student(name='Chuan Qin', dob=date(1989, 2, 5), gpa=4, group=g101,\n# courses=[c3, c5, c6])\n# s4 = Student(name='Rebecca Lawson', dob=date(1990, 4, 18), tel='234-567', gpa=3.3, group=g102,\n# courses=[c1, c4, c5, c6])\n# s5 = Student(name='Maria Ionescu', dob=date(1991, 4, 23), gpa=3.9, group=g102,\n# courses=[c1, c2, c4, c6])\n# s6 = Student(name='Oliver Blakey', dob=date(1990, 9, 8), gpa=3.1, group=g102,\n# courses=[c1, c2, c5])\n# s7 = Student(name='Jing Xia', dob=date(1988, 12, 30), gpa=3.2, group=g102,\n# courses=[c1, c3, c5, c6])\n\n\npopulate_database()\n", "sub_path": "models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 7914, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "decimal.Decimal", "line_number": 17, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "11008289", "text": "# coding: utf-8\n\nimport scipy as sp\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import fsolve\n\n# step 1 readData\ndef loadData():\n data = sp.genfromtxt(\"../data/web_traffic.tsv\", delimiter = '\\t')\n print(data[:10])\n print(data.shape)\n return data\n\n\n# 2 preprocess and clean data\ndef plotData(x, y):\n plt.scatter(x, y)\n plt.title(\"web traffic over the last mouth\")\n plt.xlabel(\"Time\")\n plt.ylabel(\"Hits/hour\")\n plt.xticks([w*7*24 for w in range(10)], ['week %i' %w for w in range(10)])\n plt.autoscale(tight = True)\n plt.grid()\n plt.show()\n\n# preprecess\ndef preprocess(data):\n x = data[:, 0]\n y = data[:, 1]\n # 数组无效值判断 sp.isnan(y) 返回 array([true, false, false, ....], dtype = bool)\n print(sp.sum(sp.isnan(y)))\n # 条件索引 x y\n x = x[~sp.isnan(y)]\n y = y[~sp.isnan(y)]\n# plotData(x, y)\n return x, y\n\n\ndef error(f, x, y):\n #f(x) - y 得到ndarray\n # ndarray ** 2 作用在每��元素.\n return sp.sum((f(x) - y)**2)\n\ndef plotModel(func, x):\n fx = sp.linspace(0, x[-1], 1000)\n plt.plot(fx, func(fx), linewidth = 3)\n plt.legend([\"d = %i\" % func.order], loc=\"upper left\")\n\n\n# step 4 select model\ndef selectModel(x, y):\n fp1, res, rank, sv, rcond = sp.polyfit(x, y, 1, full=True)\n print(\"Model paramters is %s\" % fp1)\n print(res)\n func1 = sp.poly1d(fp1)\n print(error(func1, x, y))\n plotModel(func1, x)\n\n fp2 = sp.polyfit(x, y, 2)\n print(fp2)\n func2 = sp.poly1d(fp2)\n print(error(func2, x, y))\n\n# step 5 评估model\n# another Sight of Data may be a good direction\ndef anotherDataSight(x, y):\n inflection = int(3.5*7*24)\n testStart = 700\n\n xa = x[:inflection]\n ya = y[:inflection]\n\n xb = x[inflection:testStart]\n yb = y[inflection:testStart]\n\n testx = x[testStart:]\n testy = y[testStart:]\n\n fa = sp.poly1d(sp.polyfit(xa, ya, 1))\n fb = sp.poly1d(sp.polyfit(xb, yb, 1))\n fa_error = error(fa, testx, testy)\n fb_error = error(fb, testx, testy)\n print(\"Error Inflection is %f\" %(fa_error + fb_error))\n\n fb1 = sp.poly1d(sp.polyfit(xb, yb, 1))\n fb2 = sp.poly1d(sp.polyfit(xb, yb, 2))\n fb10 = sp.poly1d(sp.polyfit(xb, yb, 10))\n fb100 = sp.poly1d(sp.polyfit(xb, yb, 100))\n print(\"Error only use the most importent data : \", error(fb1, testx, testy))\n print(\"Error only use the most importent data : \", error(fb2, testx, testy))\n print(\"Error only use the most importent data : \", error(fb10, testx, testy))\n print(\"Error only use the most importent data : \", error(fb100, testx, testy))\n\n print(fb2)\n reach_max = fsolve(fb2-100000, 1000)/(7*24)\n print(\"100000 Hits/hour expected at week %f\" % reach_max)\n\n\n\n\n\n\n\ndef easyToUse():\n data = loadData()\n x, y = preprocess(data)\n selectModel(x, y)\n anotherDataSight(x, y)\n\n\n\nif __name__ == \"__main__\":\n easyToUse()\n\n", "sub_path": "machine_learn_system_design/1400OS_01_Codes/mycode/webReg.py", "file_name": "webReg.py", "file_ext": "py", "file_size_in_byte": 2871, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "scipy.genfromtxt", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.autoscale", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "scipy.sum", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.isnan", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.isnan", "line_number": 33, "usage_type": "call"}, {"api_name": "scipy.isnan", "line_number": 34, "usage_type": "call"}, {"api_name": "scipy.sum", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.linspace", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "scipy.polyfit", "line_number": 52, "usage_type": "call"}, {"api_name": "scipy.poly1d", "line_number": 55, "usage_type": "call"}, {"api_name": "scipy.polyfit", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.poly1d", "line_number": 61, "usage_type": "call"}, {"api_name": "scipy.poly1d", "line_number": 79, "usage_type": "call"}, {"api_name": "scipy.polyfit", "line_number": 79, "usage_type": "call"}, {"api_name": "scipy.poly1d", "line_number": 80, "usage_type": "call"}, {"api_name": "scipy.polyfit", "line_number": 80, "usage_type": "call"}, {"api_name": "scipy.poly1d", "line_number": 85, "usage_type": "call"}, {"api_name": "scipy.polyfit", "line_number": 85, "usage_type": "call"}, {"api_name": "scipy.poly1d", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.polyfit", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.poly1d", "line_number": 87, "usage_type": "call"}, {"api_name": "scipy.polyfit", "line_number": 87, "usage_type": "call"}, {"api_name": "scipy.poly1d", "line_number": 88, "usage_type": "call"}, {"api_name": "scipy.polyfit", "line_number": 88, "usage_type": "call"}, {"api_name": "scipy.optimize.fsolve", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "281004001", "text": "import ast\nfrom collections import namedtuple\nimport queue\nimport shlex\nimport threading\n\nfrom prompt_toolkit import CommandLineInterface\nfrom prompt_toolkit.history import InMemoryHistory\nfrom prompt_toolkit.styles import style_from_dict\nfrom prompt_toolkit.shortcuts import create_eventloop, create_prompt_application\nfrom prompt_toolkit.token import Token\n\nExit = namedtuple('Exit', '')\nSet = namedtuple('Set', 'key value')\nSkip = namedtuple('Skip', '')\n\n\nclass Prompt:\n def __init__(self, run_name, state_obj):\n self.run_name = run_name\n self.state_obj = state_obj\n self.cli = None\n self.q = queue.Queue()\n self.thread = threading.Thread(target=self.run)\n\n def start(self):\n self.thread.start()\n\n def stop(self):\n if self.cli:\n self.cli.exit()\n self.thread.join()\n\n def get_bottom_toolbar_tokens(self, cli):\n return [(Token.Toolbar, 'Run '),\n (Token.Name, self.run_name),\n (Token.Toolbar, ' in progress.')]\n\n def get_prompt_tokens(self, cli):\n return [(Token.Prompt, '> ')]\n\n def run(self):\n style = style_from_dict({\n Token.Prompt: 'bold',\n Token.Toolbar: '#ccc bg:#333',\n Token.Name: '#fff bold bg:#333',\n })\n\n history = InMemoryHistory()\n eventloop = create_eventloop()\n app = create_prompt_application(history=history, style=style,\n get_bottom_toolbar_tokens=self.get_bottom_toolbar_tokens,\n get_prompt_tokens=self.get_prompt_tokens)\n self.cli = CommandLineInterface(app, eventloop)\n\n with self.cli.patch_stdout_context(raw=True):\n while True:\n try:\n self.cli.run()\n doc = self.cli.return_value()\n if doc is None:\n return\n cmd = shlex.split(doc.text)\n app.buffer.reset(append_to_history=True)\n\n if not cmd:\n continue\n elif cmd[0] in ('exit', 'quit'):\n self.q.put(Exit())\n return\n elif cmd[0] == 'help':\n print('Help text forthcoming.')\n elif cmd[0] == 'skip':\n self.q.put(Skip())\n elif cmd[0] == 'set':\n self.q.put(Set(cmd[1], ast.literal_eval(' '.join(cmd[2:]))))\n else:\n print('Unknown command. Try \\'help\\'.')\n except KeyboardInterrupt:\n continue\n except EOFError:\n self.q.put(Exit())\n return\n except Exception as err:\n print(err)\n self.q.put(Exit())\n return\n\n\nclass PromptResponder:\n def __init__(self, q, args):\n self.q = q\n self.args = args\n\n def __call__(self):\n try:\n while True:\n event = self.q.get(block=False)\n if isinstance(event, Exit):\n raise KeyboardInterrupt()\n elif isinstance(event, Skip):\n return event\n elif isinstance(event, Set):\n setattr(self.args, event.key, event.value)\n except queue.Empty:\n pass\n", "sub_path": "prompt.py", "file_name": "prompt.py", "file_ext": "py", "file_size_in_byte": 3471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "collections.namedtuple", "line_number": 13, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 14, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 15, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 23, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 24, "usage_type": "call"}, {"api_name": "prompt_toolkit.token.Token.Toolbar", "line_number": 35, "usage_type": "attribute"}, {"api_name": "prompt_toolkit.token.Token", "line_number": 35, "usage_type": "name"}, {"api_name": "prompt_toolkit.token.Token.Name", "line_number": 36, "usage_type": "attribute"}, {"api_name": "prompt_toolkit.token.Token", "line_number": 36, "usage_type": "name"}, {"api_name": "prompt_toolkit.token.Token.Toolbar", "line_number": 37, "usage_type": "attribute"}, {"api_name": "prompt_toolkit.token.Token", "line_number": 37, "usage_type": "name"}, {"api_name": "prompt_toolkit.token.Token.Prompt", "line_number": 40, "usage_type": "attribute"}, {"api_name": "prompt_toolkit.token.Token", "line_number": 40, "usage_type": "name"}, {"api_name": "prompt_toolkit.styles.style_from_dict", "line_number": 43, "usage_type": "call"}, {"api_name": "prompt_toolkit.token.Token.Prompt", "line_number": 44, "usage_type": "attribute"}, {"api_name": "prompt_toolkit.token.Token", "line_number": 44, "usage_type": "name"}, {"api_name": "prompt_toolkit.token.Token.Toolbar", "line_number": 45, "usage_type": "attribute"}, {"api_name": "prompt_toolkit.token.Token", "line_number": 45, "usage_type": "name"}, {"api_name": "prompt_toolkit.token.Token.Name", "line_number": 46, "usage_type": "attribute"}, {"api_name": "prompt_toolkit.token.Token", "line_number": 46, "usage_type": "name"}, {"api_name": "prompt_toolkit.history.InMemoryHistory", "line_number": 49, "usage_type": "call"}, {"api_name": "prompt_toolkit.shortcuts.create_eventloop", "line_number": 50, "usage_type": "call"}, {"api_name": "prompt_toolkit.shortcuts.create_prompt_application", "line_number": 51, "usage_type": "call"}, {"api_name": "prompt_toolkit.CommandLineInterface", "line_number": 54, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 63, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 76, "usage_type": "call"}, {"api_name": "queue.Empty", "line_number": 105, "usage_type": "attribute"}]} +{"seq_id": "247833411", "text": "from utils import utf16intarraytostr\nfrom struct import unpack\nimport uuid as _uuid\nimport binascii as _binascii\n\nfrom . import FileEntry\n\n\nclass GameDataFormatHeaderEntry(FileEntry):\n size_of_entry = 8232\n\n defaults = {\n 'MagicNumber': 0, # $484D4752, RM_MAGICNUMBER\n 'HeaderVersion': 0, # version\n 'HeaderSize': 0, # size of the header\n 'ThumbnailOffset': 0, # location of the thumbnail (jpg)\n 'ThumbnailSize': 0, # size in bye of the thumbnail\n 'GameGUID': None, # game id (GUID)\n 'GameName': None, # Unicode game name\n 'SaveName': None, # Unicode save name\n 'LevelName': None, # Unicode level name\n 'Comments': None # Unicode comments\n }\n\n def read_stream(self, stream):\n try:\n unpacked_data = unpack('<4BiiQiLHHBB6s1024H1024H1024H1024H', stream.read(self.size_of_entry))\n except:\n return None\n\n self.MagicNumber = bytearray(unpacked_data[0:4][::-1]).decode('ascii')\n self.HeaderVersion = unpacked_data[4]\n if self.MagicNumber != 'HMGR' or self.HeaderVersion != 1:\n return None\n\n self.HeaderSize = unpacked_data[5]\n self.ThumbnailOffset = unpacked_data[6]\n self.ThumbnailSize = unpacked_data[7]\n\n # Convert Delphi 4 component GUID to the 6 components\n # of a Python GUID.\n guidp1, guidp2, guidp3, guidp4, guidp5 = unpacked_data[8:13]\n guidp6 = int(_binascii.b2a_hex(unpacked_data[13]), 16)\n self.GameGUID = str(_uuid.UUID(fields=(guidp1, guidp2, guidp3, guidp4, guidp5, guidp6)))\n\n self.GameName = utf16intarraytostr(unpacked_data[14:1038])\n self.SaveName = utf16intarraytostr(unpacked_data[1038:2062])\n self.LevelName = utf16intarraytostr(unpacked_data[2062:3086])\n self.Comments = utf16intarraytostr(unpacked_data[3086:4110])\n return self\n", "sub_path": "entry/header.py", "file_name": "header.py", "file_ext": "py", "file_size_in_byte": 1896, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "struct.unpack", "line_number": 27, "usage_type": "call"}, {"api_name": "binascii.b2a_hex", "line_number": 43, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.utf16intarraytostr", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.utf16intarraytostr", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.utf16intarraytostr", "line_number": 48, "usage_type": "call"}, {"api_name": "utils.utf16intarraytostr", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "71374446", "text": "import os\nfrom collections import OrderedDict\nfrom ..logging import log\n\nROOTDIR = '/opt/stage'\nPHASERANK = OrderedDict([('special',4),('extract',3),('unpack',2),('incoming',1),('proto',0)])\n\nclass Stage(object):\n\n def __init__(self,rootdir=None):\n if rootdir is None:\n rootdir = ROOTDIR\n self.rootdir = rootdir\n\n\n def dirpath(self,phase):\n j = PHASERANK.get(phase)\n phasedir = \"%d-%s\" % (j,phase) if j is not None else phase\n return \"%s/%s\" % (self.rootdir,phasedir)\n\n def __dirpath(self,phase,prefix):\n j = PHASERANK.get(phase)\n phasedir = \"%d-%s\" % (j,phase) if j is not None else phase\n return \"%s/%s/%s\" % (self.rootdir,phasedir,prefix)\n\n def filepath(self,phase,name):\n _dirpath = self.dirpath(phase)\n return \"%s/%s.csv\" % (_dirpath,name)\n\n def __filepath(self,phase,prefix,name):\n _dirpath = self.dirpath(phase,prefix)\n return \"%s/%s.csv\" % (_dirpath,name)\n\n def mkdir_phase(self,phase,autoviv=False):\n _dirpath = self.dirpath(phase)\n if not os.path.exists(_dirpath):\n if autoviv:\n os.mkdir(_dirpath)\n else:\n raise ValueError(\"invalid state -- can't find dirpath '%s'\" % _dirpath)\n return _dirpath\n\n def __mkdir_phase(self,phase,prefix,autoviv=False):\n _dirpath = self.dirpath(phase,prefix)\n if not os.path.exists(_dirpath):\n if autoviv:\n os.mkdir(_dirpath)\n else:\n raise ValueError(\"invalid state -- can't find dirpath '%s'\" % _dirpath)\n return _dirpath\n\n def mkpath(self,phase,name,autoviv=False):\n _dirpath = self.mkdir_phase(phase,autoviv)\n return \"%s/%s.csv\" % (_dirpath,name)\n\n def __mkpath(self,phase,prefix,name,autoviv=False):\n _dirpath = self.mkdir_phase(phase,prefix,autoviv)\n return \"%s/%s.csv\" % (_dirpath,name)\n\n def latest(self,name):\n for phase in PHASERANK.keys():\n _filepath = self.filepath(phase,name)\n log.debug(\"%s:%s -> %s\" % (name,phase,_filepath))\n if os.path.exists(_filepath):\n return _filepath\n return None\n\n def __latest(self,prefix,name):\n for phase in PHASERANK.keys():\n _filepath = self.filepath(phase,prefix,name)\n log.debug(\"%s.%s:%s -> %s\" % (prefix,name,phase,_filepath))\n if os.path.exists(_filepath):\n return _filepath\n return None\n\n\n\"\"\"\ndef export(prefix,name,stage=STAGE,autoviv=False):\n return mkpath(stage,'export',prefix,name,autoviv)\n\ndef incoming(prefix,name,stage=STAGE,autoviv=False):\n return mkpath(stage,'incoming',prefix,name,autoviv)\n\"\"\"\n\n\n", "sub_path": "kivo/stage/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 2729, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "collections.OrderedDict", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.log.debug", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.log", "line_number": 63, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "logging.log.debug", "line_number": 71, "usage_type": "call"}, {"api_name": "logging.log", "line_number": 71, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}]} +{"seq_id": "31111139", "text": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport pandas as pd\nimport pyomo.contrib.parmest.parmest as parmest\nfrom pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model \n\n### Parameter estimation\n\n# Vars to estimate\ntheta_names = ['k1', 'k2', 'k3']\n\n# Data, includes multiple sensors for ca and cc\ndata = pd.read_excel('reactor_data_multisensor.xlsx') \n\n# Sum of squared error function\ndef SSE_multisensor(model, data): \n expr = ((float(data['ca1']) - model.ca)**2)*(1/3) + \\\n ((float(data['ca2']) - model.ca)**2)*(1/3) + \\\n ((float(data['ca3']) - model.ca)**2)*(1/3) + \\\n (float(data['cb']) - model.cb)**2 + \\\n ((float(data['cc1']) - model.cc)**2)*(1/2) + \\\n ((float(data['cc2']) - model.cc)**2)*(1/2) + \\\n (float(data['cd']) - model.cd)**2\n return expr\n\npest = parmest.Estimator(reactor_design_model, data, theta_names, SSE_multisensor)\nobj, theta = pest.theta_est()\nprint(obj)\nprint(theta)\n", "sub_path": "pyomo/contrib/parmest/examples/reactor_design/multisensor_data_example.py", "file_name": "multisensor_data_example.py", "file_ext": "py", "file_size_in_byte": 1486, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_excel", "line_number": 21, "usage_type": "call"}, {"api_name": "pyomo.contrib.parmest.parmest.Estimator", "line_number": 34, "usage_type": "call"}, {"api_name": "pyomo.contrib.parmest.examples.reactor_design.reactor_design.reactor_design_model", "line_number": 34, "usage_type": "argument"}, {"api_name": "pyomo.contrib.parmest.parmest", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "32990728", "text": "\"\"\"\nCode par Oscar Van Slijpe\n22/11/2018\nRéférence pour la librarie \"matplotlib\" : https://matplotlib.org/index.html\n\"\"\"\n\nimport matplotlib.pyplot as plt #Gère la création des graphiques\nimport matplotlib.animation as animation #Gère l'actualisation en temps réel des graphiques\n\n\ndef animate(i): #Cette fonction crée la fenêtre avec les graphiques des diférents paramètres. \n\n nombreDeDonnées = 2 #Nombre de données eploitées (ne peut pas être supérieur au nombre de données comptenus dans le fichier txt.\n data=import_data(nombreDeDonnées) #Récupèration via la fct \"import_data\" d'un matrice dont chaque ligne contient les données d'un paramètre (temp, bpm, ...).\n nbr = [i+1 for i in range(len(data[0]))] #Créer une liste qui va représenté l'abscisse sur chacun des graph.\n\n #Création du graphique de température\n temp=data[0] #Stock les données du paramètre température dans la liste \"temp\".\n ax1.clear() #Efface le graphique n°1 précédement tracé.\n ax1.plot(nbr, temp) #(Re)crée un graphique avec en abscisse, le nombre de données, et en ordonnée la température.\n ax1.axis([1,int(len(temp))+1,min(temp)-2,max(temp)+2]) #Configure les bornes du graph : [(min en y), (max en y), (min en x), (max en x)]\n ax1.set_title('Température'+'\\n'+'Moyenne = '+str(moy(temp))) #Affichage d'un titre et de la moyenne au dessus du graphique (moyenne obtenu grâce à la fonction \"moy\").\n ax1.set_ylabel('°C') #Nomme l'ordonné\n ax1.set_xlabel('Temps') #Nomme l'abscisse\n\n bpm=data[1] #Idem que précédement mais avec le paramètre BPM.\n ax2.clear()\n ax2.plot(nbr, bpm)\n ax2.axis([1,int(len(bpm))+1,min(bpm)-1,max(bpm)+1])\n ax2.set_title('BPM'+'\\n'+'Moyenne = '+str(moy(bpm)))\n ax2.set_ylabel('/seconde')\n ax2.set_xlabel('Temps')\n\n #Ajouter les autres paramètres\n\ndef import_data(nbr): #Cette fonction permet d'importer les paramètres depuis le fichier \"Data.txt\".\n\n données=[] #Création de la matrice qui va contenir les données (chaque ligne correspond au données d'un paramètre) \n for i in range(nbr):\n données.append([])\n \n file = open('Data.txt','r') #Ouvre le fichier \"Data.txt\".\n liste=file.readlines() #Récupèration des données de \"Data.txt\" dans une liste dont chaque ligne contient une donnée de chaque paramètre (dans l'ordre).\n file.close() #Ferme le fichier \"Data.txt\" pour éviter d'interférer avec le programme MQTT.\n\n for line in liste: #Lecture de chaque ligne.\n elements=line.split() #Pour chaque ligne, on va séparer les différentes données.\n for i in range(nbr): #Lecture de chaque élements\n données[i].append(float(elements[i])) #On stocke chaque donnée dans la ligne de la matrice correspondante (température : ligne 1, BPM : ligne 2, ...).\n\n return données #Renvoie de la matrice ainsi crée.\n \n\ndef moy(liste): #Fonction qui renvoye la moyenne des valeurs contenu dans la liste donnée.\n \n somme=0\n for i in liste:\n somme+=i\n moyenne=round(somme/len(liste),2) #Arrondit à 2 chiffres après la virgule.\n \n return moyenne\n\n\nfig = plt.figure(\"Données en temps réel\") #Création de la fenêtre qui va contenir tous les graphes.\n\nax1 = fig.add_subplot(2,3,1) #Crée le premier graphe et lui donne la position 1 dans une grille de 2 lignes et 3 colonnes.\nax2 = fig.add_subplot(2,3,2) #crée le deuxième graphe et lui donne la position 2 dans une grille de 2 lignes et 3 colonnes.\n#ax3 = fig.add_subplot(2,3,3)\n#ax4 = fig.add_subplot(2,3,4)\n#ax5 = fig.add_subplot(2,3,5)\n\nani = animation.FuncAnimation(fig,animate,interval=2000) #Paramètre l'actualisation de la fenêtre des graphes (contenu,fonction qui contient les différents graphes, interval d'actualisation en ms).\n\nplt.show() #Affiche la fenêtre des graphes\n", "sub_path": "Graph.py", "file_name": "Graph.py", "file_ext": "py", "file_size_in_byte": 5433, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "572113915", "text": "import csv\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom typing import List, Tuple, Dict\n\nfrom resultinterpretation.model import TestStepResult, TestStepResultItem\n\n\nclass CSVFileExtractor:\n def __init__(self, independentName: str, dependentPrefix: str):\n \"\"\"\n :param independentName: Name of the independent variable column\n :param dependentPrefix: Prefix that all dependent variable columns should have\n \"\"\"\n self.__independentName = independentName\n self.__dependentPrefix = dependentPrefix\n\n def extract(self, paths: List[Path]) -> List[TestStepResultItem]:\n columnDict = {}\n\n for path in paths:\n with open(str(path.absolute())) as csvFile:\n reader = csv.reader(csvFile, delimiter=\",\")\n\n lineNumber = 0\n\n independentColumnNumber = 0\n dependentColumnNumbers = {}\n\n for line in reader:\n if lineNumber == 0:\n independentColumnNumber = line.index(self.__independentName)\n for index, column in enumerate(line, 0):\n if column.startswith(self.__dependentPrefix):\n dependentColumnNumbers[index] = column\n else:\n try:\n independentColumnValue = line[independentColumnNumber]\n\n for columnIndex, dependentVariableName in dependentColumnNumbers.items():\n if dependentVariableName not in columnDict:\n columnDict[dependentVariableName] = {}\n\n valueDictForColumn = columnDict[dependentVariableName]\n\n if independentColumnValue not in valueDictForColumn:\n valueDictForColumn[independentColumnValue] = []\n\n dependentVariableValueList = valueDictForColumn[independentColumnValue]\n\n try:\n dependentVariableValue = line[columnIndex]\n\n dependentVariableValueList.append(dependentVariableValue)\n except IndexError:\n raise Exception(\"Row {0} did not contain index of dependent variable {1}.\"\n .format(str(line), str(dependentVariableValue)))\n except IndexError:\n raise Exception(\"Row {0} did not contain index of independent column {1}.\"\n .format(str(line), str(self.__independentName)))\n\n lineNumber += 1\n\n ret = []\n\n for dataSetName, values in columnDict.items():\n aggregates = {int(k): self.calculateAggregate(v) for k, v in values.items()}\n\n resultObject = TestStepResultItem(\n dataSetName,\n OrderedDict(sorted(aggregates.items()))\n )\n\n ret.append(resultObject)\n\n return ret\n\n def calculateAggregate(self, data: List):\n return sum([float(item) for item in data]) / len(data)\n\n", "sub_path": "utils/evaluation/resultinterpretation/CSVFileExtractor.py", "file_name": "CSVFileExtractor.py", "file_ext": "py", "file_size_in_byte": 3276, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 23, "usage_type": "call"}, {"api_name": "resultinterpretation.model.TestStepResultItem", "line_number": 69, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 71, "usage_type": "call"}, {"api_name": "resultinterpretation.model.TestStepResultItem", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 78, "usage_type": "name"}]} +{"seq_id": "422458577", "text": "import sys, traceback, imp, os, subprocess, logging, datetime, re, signal\nfrom unipath import Path\nimport multiprocessing\n\n\nimport YhLog\nlogger = logging.getLogger(__file__)\ndef process(name='SearchServer'):\n p = multiprocessing.Process(target=restart, kwargs={'py':name})\n p.daemon=True\n p.start()\n p.join()\ndef restart(py='SearchServer'):\n str_cmd = 'ps -ef | grep %s' % py\n logger.error('restart cmd %s' % str_cmd)\n p = subprocess.Popen(str_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()\n list_pid = []\n for buf in p[0].split('\\n'):\n logger.error('restart %s' % buf)\n try:\n user, pid, _ = re.split(r'\\s+', buf,2)\n pid = int(pid)\n logger.error('kill %s' % pid)\n os.kill(pid, signal.SIGKILL)\n except:\n logger.error('%s\\t%s' % (buf, traceback.format_exc()))\n\ndef start():\n cwd = Path(__file__).absolute().ancestor(1)\n subprocess.check_call('cd %s; python SearchServer.py' % cwd, shell=True)\n \nif __name__=='__main__':\n restart()\n #start()", "sub_path": "searchengine/Restart.py", "file_name": "Restart.py", "file_ext": "py", "file_size_in_byte": 1089, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 9, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 16, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "re.split", "line_number": 21, "usage_type": "call"}, {"api_name": "os.kill", "line_number": 24, "usage_type": "call"}, {"api_name": "signal.SIGKILL", "line_number": 24, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 26, "usage_type": "call"}, {"api_name": "unipath.Path", "line_number": 29, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "520686666", "text": "import asyncio\r\n\r\nimport asyncpg\r\nimport time\r\nfrom data import config\r\n\r\n\r\nasync def bench_asyncpg_pool():\r\n # pool = await asyncpg.create_pool(user='postgres', host='127.0.0.1')\r\n pool = await asyncpg.create_pool(\r\n user=config.PGUSER,\r\n password=config.PGPASSWORD,\r\n host=config.IP,\r\n database=config.DATABASE\r\n )\r\n power = 2\r\n start = time.monotonic()\r\n for i in range(1, 3):\r\n async with pool.acquire() as con:\r\n rez = await con.fetch('SELECT * FROM Users')\r\n print(rez)\r\n\r\n await pool.close()\r\n end = time.monotonic()\r\n print(end - start)\r\n\r\n\r\nloop = asyncio.get_event_loop()\r\nloop.run_until_complete(bench_asyncpg_pool())", "sub_path": "postgres_choice.py", "file_name": "postgres_choice.py", "file_ext": "py", "file_size_in_byte": 712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "asyncpg.create_pool", "line_number": 10, "usage_type": "call"}, {"api_name": "data.config.PGUSER", "line_number": 11, "usage_type": "attribute"}, {"api_name": "data.config", "line_number": 11, "usage_type": "name"}, {"api_name": "data.config.PGPASSWORD", "line_number": 12, "usage_type": "attribute"}, {"api_name": "data.config", "line_number": 12, "usage_type": "name"}, {"api_name": "data.config.IP", "line_number": 13, "usage_type": "attribute"}, {"api_name": "data.config", "line_number": 13, "usage_type": "name"}, {"api_name": "data.config.DATABASE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "data.config", "line_number": 14, "usage_type": "name"}, {"api_name": "time.monotonic", "line_number": 17, "usage_type": "call"}, {"api_name": "time.monotonic", "line_number": 24, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "408756044", "text": "###############################################################################\n# PyDial: Multi-domain Statistical Spoken Dialogue System Software\n###############################################################################\n#\n# Copyright 2015 - 2018\n# Cambridge University Engineering Department Dialogue Systems Group\n#\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n###############################################################################\n\nfrom copy import deepcopy\nimport SummaryActionRelOnly\nfrom cedm.utils import DActEntity\nimport SummaryAction\nimport policy.GPPolicy\nfrom policy.GPLib import GPSARSA\nfrom utils import Settings, ContextLogger, DiaAct\nimport math, scipy.stats\nfrom ontology import Ontology\nlogger = ContextLogger.getLogger('')\n\nclass FeudalGPSubPolicy(policy.GPPolicy.GPPolicy):\n \n def __init__(self,domainString, learning, sharedParams=None, rolename = None):\n super(FeudalGPSubPolicy, self).__init__(domainString, learning, sharedParams)\n if rolename is not None:\n inpolicyfile = ''\n outpolicyfile = ''\n \n if Settings.config.has_option('policy', 'inpolicyfile'):\n inpolicyfile = Settings.config.get('policy', 'inpolicyfile')\n if Settings.config.has_option('policy', 'outpolicyfile'):\n outpolicyfile = Settings.config.get('policy', 'outpolicyfile')\n if Settings.config.has_option('policy_'+domainString, 'inpolicyfile'):\n inpolicyfile = Settings.config.get('policy_'+domainString, 'inpolicyfile')\n if Settings.config.has_option('policy_'+domainString, 'outpolicyfile'):\n outpolicyfile = Settings.config.get('policy_'+domainString, 'outpolicyfile')\n \n outpolicyfile += \"_{}\".format(rolename)\n inpolicyfile += \"_{}\".format(rolename)\n # Learning algorithm:\n self.learner = GPSARSA(inpolicyfile,outpolicyfile,domainString=domainString, learning=self.learning, sharedParams=sharedParams)\n \n self.rolename = rolename\n \n \n def peekPolicy(self, belief):\n nonExecutableActions = self.actions.getNonExecutable(belief, self.lastSystemAction)\n if self._byeAction is not None:\n nonExecutableActions.append(self._byeAction)\n\n currentstate = self.get_State(belief)\n executable = self._createExecutable(nonExecutableActions)\n \n# print \"-------------- non-executable actions: {}\".format(nonExecutableActions)\n# print \"-------------- executable actions: \"\n# for act in executable:\n# print act.toString()\n# \n# print \"################## GPState\"\n# print currentstate._bstate\n\n if len(executable) < 1:\n logger.error(\"No executable actions\")\n\n \n \n if self._byeAction is not None:\n nonExecutableActions.append(self._byeAction)\n \n executable = self._createExecutable(nonExecutableActions) # own domains abstracted actions\n \n state = currentstate\n kernel = self.kernel\n \n Q =[]\n for action in executable:\n if self._scale <= 0:\n [mean, var] = self.QvalueMeanVar(state, action, kernel)\n logger.debug('action: ' +str(action.act) + ' mean then var:\\t\\t\\t ' + str(mean) + ' ' + str(math.sqrt(var)))\n value = mean\n gaussvar = 0\n else:\n [mean, var ] = self.QvalueMeanVar(state, action, kernel) \n gaussvar = self._scale * math.sqrt(var) \n value = gaussvar * Settings.random.randn() + mean # Sample a Q value for this action\n logger.debug('action: ' +str(action.act) + ' mean then var:\\t\\t\\t ' + str(mean) + ' ' + str(gaussvar))\n Q.append((action, value, mean, gaussvar))\n\n Q=sorted(Q,key=lambda Qvalue : Qvalue[1], reverse=True)\n \n best_action, best_actions_sampled_Q_value = Q[0][0], Q[0][1]\n actions_likelihood = 0\n if Q[0][3] != 0:\n actions_likelihood = scipy.stats.norm(Q[0][2], Q[0][3]).pdf(best_actions_sampled_Q_value)\n return [best_action, best_actions_sampled_Q_value, actions_likelihood]\n \n def act_on_with_Q(self, beliefstate):\n '''\n Main policy method: mapping of belief state to system action.\n \n This method is automatically invoked by the agent at each turn after tracking the belief state.\n \n May initially return 'hello()' as hardcoded action. Keeps track of last system action and last belief state. \n \n :param state: the belief state to act on\n :type state: :class:`~utils.DialogueState.DialogueState`\n :param hyps: n-best-list of semantic interpretations\n :type hyps: list\n :returns: the next system action of type :class:`~utils.DiaAct.DiaAct`\n '''\n if self.lastSystemAction is None and self.startwithhello:\n _systemAct , Q = 'hello()', float(\"-inf\")\n else:\n _systemAct, Q = self.nextAction_with_Q(beliefstate)\n self.lastSystemAction = _systemAct\n self.prevbelief = beliefstate\n \n systemAct = DiaAct.DiaAct(_systemAct)\n return systemAct, Q\n \n def nextAction_with_Q(self, belief):\n '''\n Selects next action to take based on the current belief and a list of non executable actions\n NOT Called by BCM\n \n :param belief:\n :type belief:\n :param hyps:\n :type hyps:\n :returns:\n '''\n nonExecutableActions = self.actions.getNonExecutable(belief, self.lastSystemAction)\n \n currentstate = self.get_State(belief)\n executable = self._createExecutable(nonExecutableActions)\n \n# print \"-------------- non-executable actions: {}\".format(nonExecutableActions)\n# print \"-------------- executable actions: \"\n# for act in executable:\n# print act.toString()\n# \n# print \"################## GPState\"\n# print currentstate._bstate\n\n if len(executable) < 1:\n return None, float(\"-inf\")\n\n \n \"\"\"\n ordered_actions_with_Qsamples = self.learner.policy(state=currentstate, kernel=self.kernel, executable=executable)\n best_action = ordered_actions_with_Qsamples[0][0].act # [0][1] is sampled Q value\n self.episode.latest_Q_sample_from_choosen_action = ordered_actions_with_Qsamples[0][1]\n \"\"\"\n best_action, actions_sampledQ, actions_likelihood = self.learner.policy(\n state=currentstate, kernel=self.kernel, executable=executable)\n \n logger.debug('policy activated')\n \n summaryAct = self._actionString(best_action.act)\n \n if self.learning: \n best_action.actions_sampled_Qvalue = actions_sampledQ\n best_action.likelihood_choosen_action = actions_likelihood \n \n self.actToBeRecorded = best_action\n # Finally convert action to MASTER ACTION\n masterAct = self.actions.Convert(belief, summaryAct, self.lastSystemAction)\n return masterAct, actions_sampledQ\n \n def hasExecutableAction(self, belief):\n nonExecutableActions = self.actions.getNonExecutable(belief, self.lastSystemAction)\n return len(nonExecutableActions) != len(self.actions.action_names) \n \n \nclass FeudalGPSubPolicyRel(FeudalGPSubPolicy):\n '''\n An implementation of the dialogue policy based on Gaussian process and the GPSarsa algorithm to optimise actions where states are GPState and actions are GPAction.\n \n The class implements the public interfaces from :class:`~Policy.Policy` and :class:`~PolicyCommittee.CommitteeMember`.\n '''\n def __init__(self, domainString, learning, sharedParams=None, rolename = None):\n super(FeudalGPSubPolicyRel, self).__init__(domainString,learning,sharedParams, rolename)\n \n self.actions = SummaryActionRelOnly.SummaryActionRelOnly(domainString, False, self.useconfreq,rolename)\n # Total number of system actions.\n self.numActions = len(self.actions.action_names)\n \n def act_on(self, beliefstate):\n '''\n Main policy method: mapping of belief state to system action.\n \n This method is automatically invoked by the agent at each turn after tracking the belief state.\n \n May initially return 'hello()' as hardcoded action. Keeps track of last system action and last belief state. \n \n :param state: the belief state to act on\n :type state: :class:`~utils.DialogueState.DialogueState`\n :param hyps: n-best-list of semantic interpretations\n :type hyps: list\n :returns: the next system action of type :class:`~utils.DiaAct.DiaAct`\n '''\n if self.lastSystemAction is None and self.startwithhello:\n _systemAct = 'hello()'\n else:\n _systemAct = self.nextAction(beliefstate)\n self.lastSystemAction = _systemAct\n self.prevbelief = beliefstate\n \n systemAct = DActEntity.DiaActEntity(_systemAct)\n return systemAct\n \n def act_on_with_Q(self, beliefstate):\n '''\n Main policy method: mapping of belief state to system action.\n \n This method is automatically invoked by the agent at each turn after tracking the belief state.\n \n May initially return 'hello()' as hardcoded action. Keeps track of last system action and last belief state. \n \n :param state: the belief state to act on\n :type state: :class:`~utils.DialogueState.DialogueState`\n :param hyps: n-best-list of semantic interpretations\n :type hyps: list\n :returns: the next system action of type :class:`~utils.DiaAct.DiaAct`\n '''\n if self.lastSystemAction is None and self.startwithhello:\n _systemAct , Q = 'hello()', float(\"-inf\")\n else:\n _systemAct, Q = self.nextAction_with_Q(beliefstate)\n self.lastSystemAction = _systemAct\n self.prevbelief = beliefstate\n \n systemAct = DActEntity.DiaActEntity(_systemAct)\n return systemAct, Q\n \n def get_State(self, beliefstate, keep_none=False): \n '''\n Called by BCM\n \n :param beliefstate:\n :type beliefstate:\n :param keep_none:\n :type keep_none:\n '''\n return GPStateRel(beliefstate, keep_none=keep_none, replace=self.replace, domainString=self.domainString)\n \n def constructInitialBelief(self, features):\n belief = {'beliefs': dict(), 'features': deepcopy(features)}\n \n rel = self.rolename.split('#')\n common_slots = Ontology.global_ontology.get_common_slots(rel[0],rel[1])\n # {u'area#area': 0.0, u'pricerange#pricerange': 0.0}\n belief['beliefs']['requested'] = dict()\n for slot in common_slots:\n s = slot\n belief['beliefs'][s] = {'**NONE**': 1.0, 'dontcare' : 0.0, '=': 0.0}\n belief['beliefs']['requested'][s] = 0.0\n \n return belief\n \nclass FeudalGPSubPolicyObj(FeudalGPSubPolicy):\n '''\n An implementation of the dialogue policy based on Gaussian process and the GPSarsa algorithm to optimise actions where states are GPState and actions are GPAction.\n \n The class implements the public interfaces from :class:`~Policy.Policy` and :class:`~PolicyCommittee.CommitteeMember`.\n '''\n def __init__(self, domainString, learning, sharedParams=None, rolename = None):\n super(FeudalGPSubPolicyRel, self).__init__(domainString,learning,sharedParams, rolename)\n \n self.actions = SummaryAction.SummaryAction(domainString, False, self.useconfreq)\n \n # Total number of system actions.\n self.numActions = len(self.actions.action_names)\n \n def get_State(self, beliefstate, keep_none=False): \n '''\n Called by BCM\n \n :param beliefstate:\n :type beliefstate:\n :param keep_none:\n :type keep_none:\n '''\n return GPStateObj(beliefstate, keep_none=keep_none, replace=self.replace, domainString=self.domainString)\n \n def act_on(self, beliefstate):\n '''\n Main policy method: mapping of belief state to system action.\n \n This method is automatically invoked by the agent at each turn after tracking the belief state.\n \n May initially return 'hello()' as hardcoded action. Keeps track of last system action and last belief state. \n \n :param state: the belief state to act on\n :type state: :class:`~utils.DialogueState.DialogueState`\n :param hyps: n-best-list of semantic interpretations\n :type hyps: list\n :returns: the next system action of type :class:`~utils.DiaAct.DiaAct`\n '''\n if self.lastSystemAction is None and self.startwithhello:\n _systemAct = 'hello()'\n else:\n _systemAct = self.nextAction(beliefstate)\n self.lastSystemAction = _systemAct\n self.prevbelief = beliefstate\n \n systemAct = DActEntity.DiaActEntity(_systemAct)\n return systemAct\n \nclass GPStateObj(policy.GPPolicy.GPState):\n '''\n Currently an exact copy of the original GPState\n '''\n pass\n\nclass GPStateRel(policy.GPPolicy.GPState): \n def extractSimpleBelief(self, b, replace={}):\n '''\n From the belief state b extracts discourseAct, method, requested slots, name, goal for each slot,\n history whether the offer happened, whether last action was inform none, and history features.\n Sets self._bstate\n '''\n with_other = 0\n without_other = 0\n self.isFullBelief = True\n \n for elem in b['beliefs'].keys():\n if elem == 'discourseAct':\n self._bstate[\"goal_discourseAct\"] = b['beliefs'][elem].values()\n without_other +=1\n elif elem == 'method':\n self._bstate[\"goal_method\"] = b['beliefs'][elem].values()\n without_other +=1\n elif elem == 'requested' :\n for slot in b['beliefs'][elem]:\n cur_slot=slot\n if len(replace) > 0:\n cur_slot = replace[cur_slot]\n self._bstate['hist_'+cur_slot] = self.extractSingleValue(b['beliefs']['requested'][slot])\n without_other +=1\n else:\n if elem == 'name':\n self._bstate[elem] = self.extractBeliefWithOther(b['beliefs']['name'])\n with_other +=1\n else:\n cur_slot=elem\n if len(replace) > 0:\n cur_slot = replace[elem]\n\n self._bstate['goal_'+cur_slot] = self.extractBeliefWithOther(b['beliefs'][elem])\n with_other += 1\n\n# additionalSlots = 2\n # if elem not in Ontology.global_ontology.get_system_requestable_slots(self.domainString):\n # additionalSlots = 1\n# if len(self._bstate['goal_'+cur_slot]) !=\\\n# Ontology.global_ontology.get_len_informable_slot(self.domainString, slot=elem)+additionalSlots:\n# print self._bstate['goal_'+cur_slot]\n# logger.error(\"Different number of values for slot \"+cur_slot+\" \"+str(len(self._bstate['goal_'+cur_slot]))+\\\n# \" in ontology \"+ str(Ontology.global_ontology.get_len_informable_slot(self.domainString, slot=elem)+2)) \n \n\n# self._bstate[\"hist_offerHappened\"] = self.extractSingleValue(1.0 if b['features']['offerHappened'] else 0.0)\n# without_other +=1\n# self._bstate[\"hist_lastActionInformNone\"] = self.extractSingleValue(\n# 1.0 if len(b['features']['informedVenueSinceNone'])>0 else 0.0)\n without_other +=1\n for i,inform_elem in enumerate(b['features']['inform_info']):\n self._bstate[\"hist_info_\"+str(i)] = self.extractSingleValue(1.0 if inform_elem else 0.0)\n without_other +=1\n \n # Tom's speedup: convert belief dict to numpy vector \n self.beliefStateVec = self.slowToFastBelief(self._bstate)\n\n return\n", "sub_path": "cedm/policy/FeudalSubPolicy.py", "file_name": "FeudalSubPolicy.py", "file_ext": "py", "file_size_in_byte": 17130, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "utils.ContextLogger.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "utils.ContextLogger", "line_number": 32, "usage_type": "name"}, {"api_name": "policy.GPPolicy.GPPolicy", "line_number": 34, "usage_type": "attribute"}, {"api_name": "policy.GPPolicy", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.Settings.config.has_option", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.Settings.config", "line_number": 42, "usage_type": "attribute"}, {"api_name": "utils.Settings", "line_number": 42, "usage_type": "name"}, {"api_name": "utils.Settings.config.get", "line_number": 43, "usage_type": "call"}, {"api_name": "utils.Settings.config", "line_number": 43, "usage_type": "attribute"}, {"api_name": "utils.Settings", "line_number": 43, "usage_type": "name"}, {"api_name": "utils.Settings.config.has_option", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.Settings.config", "line_number": 44, "usage_type": "attribute"}, {"api_name": "utils.Settings", "line_number": 44, "usage_type": "name"}, {"api_name": "utils.Settings.config.get", "line_number": 45, "usage_type": "call"}, {"api_name": "utils.Settings.config", "line_number": 45, "usage_type": "attribute"}, {"api_name": "utils.Settings", "line_number": 45, "usage_type": "name"}, {"api_name": "utils.Settings.config.has_option", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.Settings.config", "line_number": 46, "usage_type": "attribute"}, {"api_name": "utils.Settings", "line_number": 46, "usage_type": "name"}, {"api_name": "utils.Settings.config.get", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.Settings.config", "line_number": 47, "usage_type": "attribute"}, {"api_name": "utils.Settings", "line_number": 47, "usage_type": "name"}, {"api_name": "utils.Settings.config.has_option", "line_number": 48, "usage_type": "call"}, {"api_name": "utils.Settings.config", "line_number": 48, "usage_type": "attribute"}, {"api_name": "utils.Settings", "line_number": 48, "usage_type": "name"}, {"api_name": "utils.Settings.config.get", "line_number": 49, "usage_type": "call"}, {"api_name": "utils.Settings.config", "line_number": 49, "usage_type": "attribute"}, {"api_name": "utils.Settings", "line_number": 49, "usage_type": "name"}, {"api_name": "policy.GPLib.GPSARSA", "line_number": 54, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 92, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 97, "usage_type": "call"}, {"api_name": "utils.Settings.random.randn", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.Settings.random", "line_number": 98, "usage_type": "attribute"}, {"api_name": "utils.Settings", "line_number": 98, "usage_type": "name"}, {"api_name": "scipy.stats.stats.norm", "line_number": 107, "usage_type": "call"}, {"api_name": "scipy.stats.stats", "line_number": 107, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 107, "usage_type": "name"}, {"api_name": "utils.DiaAct.DiaAct", "line_number": 131, "usage_type": "call"}, {"api_name": "utils.DiaAct", "line_number": 131, "usage_type": "name"}, {"api_name": "SummaryActionRelOnly.SummaryActionRelOnly", "line_number": 197, "usage_type": "call"}, {"api_name": "cedm.utils.DActEntity.DiaActEntity", "line_number": 222, "usage_type": "call"}, {"api_name": "cedm.utils.DActEntity", "line_number": 222, "usage_type": "name"}, {"api_name": "cedm.utils.DActEntity.DiaActEntity", "line_number": 246, "usage_type": "call"}, {"api_name": "cedm.utils.DActEntity", "line_number": 246, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 261, "usage_type": "call"}, {"api_name": "ontology.Ontology.global_ontology.get_common_slots", "line_number": 264, "usage_type": "call"}, {"api_name": "ontology.Ontology.global_ontology", "line_number": 264, "usage_type": "attribute"}, {"api_name": "ontology.Ontology", "line_number": 264, "usage_type": "name"}, {"api_name": "SummaryAction.SummaryAction", "line_number": 283, "usage_type": "call"}, {"api_name": "cedm.utils.DActEntity.DiaActEntity", "line_number": 320, "usage_type": "call"}, {"api_name": "cedm.utils.DActEntity", "line_number": 320, "usage_type": "name"}, {"api_name": "policy.GPPolicy.GPPolicy", "line_number": 323, "usage_type": "attribute"}, {"api_name": "policy.GPPolicy", "line_number": 323, "usage_type": "name"}, {"api_name": "policy.GPPolicy.GPPolicy", "line_number": 329, "usage_type": "attribute"}, {"api_name": "policy.GPPolicy", "line_number": 329, "usage_type": "name"}]} +{"seq_id": "166505097", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport asyncio\nimport logging\nimport os\nimport pathlib\n\nimport websockets\n\nfrom fumbbl_trans import ffb_replay\n\nimport fumlocffbrep\nimport fumlocxmlmatches\n\n\nclass Server:\n\n logger = logging.getLogger(__name__)\n\n @property\n def netloc(self):\n return '{}:{}'.format(self.host, self.port)\n\n def __init__(self, root_dir, host='localhost', port=22223,\n notify_client=True, loop=None):\n root_dir = pathlib.Path(os.path.expanduser(root_dir))\n self.root_dir = root_dir.absolute()\n self.host = host\n self.port = port\n self.loop = loop or asyncio.get_event_loop()\n self.rc = fumlocffbrep.LocalCollection(str(root_dir),\n loop = self.loop)\n self.notify_client = notify_client\n self.server = None\n self.state = 'initialized'\n\n async def handle_connection(self, ws_proto, path):\n if path != ffb_replay.Session.DEFAULT_PATH:\n return\n msg = await ws_proto.recv()\n obj = ffb_replay.Session.msg2obj(msg)\n assert obj['netCommandId'] == 'clientReplay'\n assert obj['replayToCommandNr'] == 0\n replay_id = obj['gameId']\n transport = ws_proto.writer.transport\n peername = transport.get_extra_info('peername')\n fs = 'replay request received: {:0>8}, peer: {}'\n self.logger.info(fs.format(replay_id, peername))\n async with self.rc.aiter(replay_id) as ait:\n i = 0\n async for cdata in ait:\n msg_ = ffb_replay.Session.cdata2msg(cdata)\n await ws_proto.send(msg_)\n arcname = '{:0>8}.{:0>3}'.format(replay_id, i)\n fs = 'message sent: {}, length: {}'\n self.logger.debug(fs.format(arcname, len(msg_)))\n i += 1\n fs = '{:0>8}: {} messages sent from {} source'\n self.logger.info(fs.format(replay_id, i,\n ('remote', 'local')[ait.is_local]))\n # -- Non standard notification to client --\n # This allows local client to spare timeouting the server\n if self.notify_client:\n msg_ = ffb_replay.NONSTANDARD_REPLAY_SENT_MSG\n await ws_proto.send(msg_)\n fs = '{!r} sent: {:0>8}'\n self.logger.debug(fs.format(msg_, replay_id))\n return\n # -- END --\n msg = await ws_proto.recv()\n if msg is not None:\n obj = ffb_replay.Session.msg2obj(msg)\n assert obj['netCommandId'] == 'clientCloseSession'\n fs = 'close session message received: {:0>8}'\n self.logger.debug(fs.format(replay_id))\n fs = 'closing connection: {:0>8}, peer: {}'\n self.logger.info(fs.format(replay_id, peername))\n\n async def serve(self):\n assert self.state == 'initialized'\n self.server = await websockets.serve(self.handle_connection,\n self.host, self.port, loop=self.loop)\n socknames = tuple(s.getsockname()\n for s in self.server.server.sockets)\n fs = 'server is listening: {}'\n for sockname in socknames:\n self.logger.info(fs.format(sockname))\n self.state = 'serving'\n\n def start(self):\n self.loop.run_until_complete(self.serve())\n try:\n self.loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n self.close()\n self.loop.run_until_complete(self.wait_closed())\n self.loop.close()\n\n def close(self):\n assert self.state == 'serving'\n self.server.close()\n self.state = 'closing'\n\n async def wait_closed(self):\n assert self.state == 'closing'\n await self.server.wait_closed()\n\n\ndef main():\n parser = argparse.ArgumentParser(parents=(\n fumlocxmlmatches.log_parser,\n ))\n parser.add_argument('path',\n action=fumlocxmlmatches.ExistingDirectory,\n help=('path of the root directory'))\n parser.add_argument('--host', default='localhost',\n help='Host (default: \"localhost\").')\n parser.add_argument('--port', type=int,\n default=22223,\n help='Port (default: 22223).')\n parser.add_argument('-s', '--standard', action='store_true',\n default=False,\n help='Standard mode, i.e. do not send notifications '\n 'when done with sending data.')\n\n\n args = parser.parse_args()\n\n loop = asyncio.get_event_loop()\n\n hdlr = logging.StreamHandler(args.logto)\n hdlr.setFormatter(fumlocxmlmatches.LOG_FORMATTER)\n Server.logger.setLevel(args.loglevel)\n Server.logger.addHandler(hdlr)\n\n server = Server(root_dir=args.path, host=args.host,\n port=args.port, notify_client=(not args.standard),\n loop=loop)\n server.start()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "scripts/fumlocwssrv.py", "file_name": "fumlocwssrv.py", "file_ext": "py", "file_size_in_byte": 4401, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 32, "usage_type": "call"}, {"api_name": "fumlocffbrep.LocalCollection", "line_number": 33, "usage_type": "call"}, {"api_name": "fumbbl_trans.ffb_replay.Session", "line_number": 40, "usage_type": "attribute"}, {"api_name": "fumbbl_trans.ffb_replay", "line_number": 40, "usage_type": "name"}, {"api_name": "fumbbl_trans.ffb_replay.Session.msg2obj", "line_number": 43, "usage_type": "call"}, {"api_name": "fumbbl_trans.ffb_replay.Session", "line_number": 43, "usage_type": "attribute"}, {"api_name": "fumbbl_trans.ffb_replay", "line_number": 43, "usage_type": "name"}, {"api_name": "fumbbl_trans.ffb_replay.Session.cdata2msg", "line_number": 54, "usage_type": "call"}, {"api_name": "fumbbl_trans.ffb_replay.Session", "line_number": 54, "usage_type": "attribute"}, {"api_name": "fumbbl_trans.ffb_replay", "line_number": 54, "usage_type": "name"}, {"api_name": "fumbbl_trans.ffb_replay.NONSTANDARD_REPLAY_SENT_MSG", "line_number": 66, "usage_type": "attribute"}, {"api_name": "fumbbl_trans.ffb_replay", "line_number": 66, "usage_type": "name"}, {"api_name": "fumbbl_trans.ffb_replay.Session.msg2obj", "line_number": 74, "usage_type": "call"}, {"api_name": "fumbbl_trans.ffb_replay.Session", "line_number": 74, "usage_type": "attribute"}, {"api_name": "fumbbl_trans.ffb_replay", "line_number": 74, "usage_type": "name"}, {"api_name": "websockets.serve", "line_number": 83, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 114, "usage_type": "call"}, {"api_name": "fumlocxmlmatches.log_parser", "line_number": 115, "usage_type": "attribute"}, {"api_name": "fumlocxmlmatches.ExistingDirectory", "line_number": 118, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 133, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 135, "usage_type": "call"}, {"api_name": "fumlocxmlmatches.LOG_FORMATTER", "line_number": 136, "usage_type": "attribute"}]} +{"seq_id": "651719910", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@project: crawl img tag source\n@author: xiaohong\n@time: 2019-01-01\n@feature: crawl image for web site or url/multi thread to download/\n\"\"\"\n\nimport setuptools\nimport io\n\nwith io.open('README.md', encoding='utf-8') as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"crawl_image\",\n version=\"0.1.0\",\n\n author=\"xiaohong2019\",\n author_email=\"2229009854@qq.com\",\n\n description=\"fast crawl web image source or image url list file\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n\n url=\"https://github.com/xiaohong2019/crawl_image\",\n packages=setuptools.find_packages(),\n install_requires=[\n 'chardet',\n 'beautifulsoup4',\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 935, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "io.open", "line_number": 14, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 17, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "526862537", "text": "import pandas as pd\nimport numpy as np\nimport scipy as sp\nfrom scipy.optimize import minimize\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nimport statsmodels as sm\nimport pylppl as lp\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets, linear_model\nimport portfolio_functions as pf\nimport pylppl as lp\n\nimport matplotlib as mp\n\nfont = {'family' : 'Helvetica',\n 'weight' : 'normal',\n 'size' : 22}\n\nlabel_size = 14\nmp.rcParams['xtick.labelsize'] = label_size\nmp.rcParams['ytick.labelsize'] = label_size\n\n####### ####### ##### ###### ##### ########## # # # #\n#### ####\n####### ####### ##### ###### ##### ########## # # # #\n\ndef simple_OLS(pars, x):\n \n \"\"\"OLS estimator for Beta reads\n beta_hat = (x'x)^-1 x'y.\n \"\"\"\n \n beta = pars[0]\n alpha = 1.2\n \n epsilon = np.random.standard_normal(len(x))\n y = np.random.standard_normal(len(x))\n \n for i in range(len(x)):\n y[i] = alpha + beta * x[i] + epsilon[i]\n \n return y\n\n####### ####### ##### ###### ##### ########## # # # #\ndef check_cost_function(t1, t2, y, x, mod=False):\n \n \"\"\"\n Check the cost function using statsmodel OLS for benchmarking\n -> Same of what Ive got\n \"\"\"\n \n if mod == False:\n # RUN THE SIMPLE LOGIT REGRESSION\n logit_mod = sm.regression.linear_model.OLS(y[t1:t2], x[t1:t2])\n logit_res = logit_mod.fit()\n _ssr = logit_res.ssr/len(data)\n return _ssr\n else:\n # RUN THE SIMPLE LOGIT REGRESSION\n logit_mod = sm.regression.linear_model.OLS(y[t1:t2], x[t1:t2])\n logit_res = logit_mod.fit()\n _ssr = logit_res.ssr/len(data)\n return _ssr\n\n####### ####### ##### ###### ##### ########## # # # #\ndef plot_2_ssr(SSR, SSR2, double=False):\n\n f,ax = plt.subplots(1,1,figsize=(12,6))\n ax.plot(SSR,linewidth=3, marker='s', color='b', markersize=10, markevery=4)\n ax.set_title('$SSR/N$ and $SSR/N - \\lambda()$ of as a function of different sample sizes', fontsize=14)\n ax.set_xlabel(r'Sample size (N)',fontsize=20)\n ax.set_ylabel(r'$(SSR/N) - \\lambda(t_2 - t_1)$',fontsize=22)\n ax.tick_params(axis='both', which='major', labelsize=14)\n ax.tick_params(axis='both', which='minor', labelsize=14)\n ax.grid(linewidth=.7)\n ax.legend([r'$DGP \\rightarrow y = \\beta X + \\epsilon$'],loc='best',fontsize=20)\n if double == True:\n a = ax.twinx()\n a.plot(SSR2, linewidth=3, marker='o', color='k', markersize=10, markevery=4)\n #a.legend([r'$y = \\beta X + \\epsilon^2$'],loc='best',fontsize=20)\n a.tick_params(axis='both', which='major', labelsize=14)\n a.tick_params(axis='both', which='minor', labelsize=14)\n a.set_ylabel(r'$SSR/N \\, (black)$',fontsize=22)\n else:\n pass\n plt.tight_layout()\n\n####### ####### ##### ###### ##### ########## # # # #\ndef beta_OLS(y, x):\n \n m = np.shape(y)\n \n y = y.reshape(m[0],1)\n x = x.reshape(m[0],1)\n \n # Estimating beta OLS\n beta_hat = np.dot(x.T,x)**-1. * np.dot(x.T,y)\n \n # return\n return beta_hat\n\n####### ####### ##### ###### ##### ########## # # # #\ndef get_sse_for_given_data(data, x, plot=False, standardize=False):\n \n # First we get beta\n beta = np.dot(x.T,x)**-1. * np.dot(x.T,data)\n \n # Pre alocate y\n y = []\n \n # recriate data\n for i in range(len(x)):\n y.append(beta * x[i])\n\n if plot == True:\n plt.plot(x, data, marker='o', linestyle='', markersize=8, color='r', alpha=0.9)\n plt.grid()\n plt.title(r'$\\beta = %.3f$'%beta)\n plt.plot(x,y,color='k', linewidth=3.5)\n else:\n pass\n \n squared_residuals = (data - y)**2\n ssr = np.sum(squared_residuals)\n \n if standardize == False:\n return ssr\n else:\n return ssr/len(data)\n\n####### ####### ##### ###### ##### ########## # # # #\ndef didier_cost(data, x, slope, standardize=True):\n \n # First we get beta\n beta = np.dot(x.T,x)**-1. * np.dot(x.T,data)\n \n # Pre alocate y\n y = []\n \n # recriate data\n for i in range(len(x)):\n y.append(beta * x[i])\n\n # get standardized sum of squares\n squared_residuals = (data - y)**2\n ssr_n = np.sum(squared_residuals)/len(data)\n ssr = np.sum(squared_residuals)\n lamb = - (ssr_n / len(data))\n ssr_new = ssr_n - lamb*(len(data))\n\n if standardize == True:\n return ssr_n - slope * (len(data))\n else:\n return ssr - (slope * (len(data)))\n\n####### ####### ##### ###### ##### ########## # # # #\ndef get_sse_across_different_sample_sizes_OLS(y, x, slope, didier=False, plot=False, standardize=False):\n \n \"\"\" Check the Sum of Squared Residuals \n at different sample-sizes\n \"\"\"\n \n if didier == True:\n \n limit = len(y) - 10\n \n test = []\n \n for tt1 in range(limit):\n t1, t2 = tt1, -1\n test.append(didier_cost(y[t1:t2], x[t1:t2], slope, standardize))\n \n return test\n\n else:\n \n limit = len(y) - 10\n \n test = []\n \n for tt1 in range(limit):\n t1, t2 = tt1, -1\n test.append(get_sse_for_given_data(y[t1:t2], x[t1:t2], plot=plot, standardize=standardize))\n \n return test\n\n####### ####### ##### ###### ##### ########## # # # #\ndef run_it_all_and_spit_normed_cost_according_to_didier(y, x, standardize=True):\n \n # calculate regularly the SSE/N for the linear model\n slope = 0.\n SSE = get_sse_across_different_sample_sizes_OLS(y, x, slope, standardize=standardize)\n \n # Fit the decreasing trend of the cost function\n slope = calculate_slope_of_normed_cost(SSE)\n \n # pass results into the iterator\n SSE_DS = get_sse_across_different_sample_sizes_OLS(y, x, slope[0], didier=True, standardize=standardize)\n \n return SSE_DS\n\n####### ####### ##### ###### ##### ########## # # # #\ndef calculate_slope_of_normed_cost(sse):\n \n #Create linear regression object\n regr = linear_model.LinearRegression(fit_intercept=False)\n \n # create x range for the sse_ds\n x_sse = np.arange(len(sse))\n x_sse = x_sse.reshape(len(sse),1)\n \n # Train the model using the training sets\n res = regr.fit(x_sse, sse)\n \n return res.coef_\n\n####### ####### ##### ###### ##### ########## # # # #\ndef simulate_and_plot_OLS_1(N, plot=True):\n\n \"\"\"\n Define sample size and plot the normalized\n SSE as a function of the sample size**-1\n for the two different standardizations schemes,\n namely the original SSE/N and SSE/N - lambda(t2-t1)\n \"\"\"\n\n # Simple monotonic increasing data vector\n x = np.arange(0,N,1)\n y = simple_OLS([0.5], x)\n\n slope=[]\n SSE = get_sse_across_different_sample_sizes_OLS(y,x,slope,plot=False,standardize=True)\n\n ds_sse = run_it_all_and_spit_normed_cost_according_to_didier(y, x, standardize=True)\n\n if plot == True:\n plot_2_ssr(ds_sse, SSE, double=True)\n else:\n return ds_sse, SSE\n\n\ndef OLS_solution(N=200):\n\n x = np.arange(0, N, 1)\n x[100:200] = x[100:200] * 2.\n y = simple_OLS([0.5], x)\n x = x + np.random.normal(0., 1., len(y))\n\n slope = []\n\n SSE = get_sse_across_different_sample_sizes_OLS(y, x, slope, plot=False, standardize=True)\n ds_sse = run_it_all_and_spit_normed_cost_according_to_didier(y, x, standardize=True)\n\n data = pd.DataFrame(y)\n\n return data, ds_sse, SSE\n\n####### ####### ##### ###### ##### ########## # # # #\n## MORE COMPLICATED SCENARIO\n####### ####### ##### ###### ##### ########## # # # #\n\ndef OLS_2(pars, x):\n\n \"\"\"OLS estimator for Beta reads\n beta_hat = (x'x)^-1 x'y.\n \"\"\"\n\n beta = pars[0]\n\n epsilon = np.random.standard_normal(len(x))\n y = np.random.standard_normal(len(x))\n\n for i in range(len(x)):\n y[i] = beta * x[i] + epsilon[i]**3\n\n return y\n\n####### ####### ##### ###### ##### ########## # # # #\n# LPPLS COST MODIFICATIONS CALCULATIONS\n####### ####### ##### ###### ##### ########## # # # #\ndef get_lambda_from_normed_SSE(DF, use_normed=True):\n\n normed_chi_2 = DF.SSE.values / DF.dt.values\n\n if use_normed == True:\n y = normed_chi_2\n else:\n y = DF.SSE.values\n x = DF.dt.values\n\n # Fit the SSE\n mod = sm.regression.linear_model.OLS(y, x)\n mod_res = mod.fit()\n\n # Lambda\n _lambda = mod_res.params[0]\n\n return _lambda\n\ndef return_normalized_sse_and_normalized_sse_lambda(DF, _lambda, use_normed=True, index_t1=True):\n\n \"\"\"\n if use_normed == True -> We use the normalised cost for calculations\n if index_t1 == True -> we return a dataframe with t1 as index and SSE/N(DS) values\n \"\"\"\n\n if use_normed == True:\n # Get normed cost\n chi_2_normed = DF.SSE / DF.dt\n else:\n chi_2_normed = DF.SSE\n\n # Get chi2_normed acording to D.S.\n chi_2_normed_lambda = [(chi_2_normed[i] - _lambda*(DF.dt[i])) for i in range(len(chi_2_normed))]\n\n # Make t1's for the index\n if index_t1 == True:\n t1s = [pd.Timestamp(DF.index[0]) - np.int(DF.dt[i]) * pd.Timedelta('1D') for i in range(len(DF.index))]\n if use_normed == True:\n # Make-it a Data Frame\n ds_chi2 = pd.DataFrame(chi_2_normed_lambda, index = t1s, columns=[r'$(SSE/N) - \\lambda()$'])\n else:\n # Make-it a Data Frame\n ds_chi2 = pd.DataFrame(chi_2_normed_lambda, index = t1s, columns=[r'$(SSE) - \\lambda()$'])\n else:\n ds_chi2 = pd.DataFrame(chi_2_normed_lambda[::-1])\n\n if use_normed == True:\n # Returning the regular chi2 for comparsion\n reg_chi2 = pd.DataFrame((DF.SSE / DF.dt).values, index = t1s, columns=['SSE/N'])\n else:\n reg_chi2 = pd.DataFrame(DF.SSE.values, index = t1s, columns=['SSE'])\n\n return ds_chi2, reg_chi2\n\n####### ####### ##### ###### ##### ########## # # # #\ndef estimate_and_plot_modified_SSE(res, data):\n\n # Get _lambda\n _lambda = get_lambda_from_normed_SSE(res, use_normed=False)\n ds_sse, reg_sse = return_normalized_sse_and_normalized_sse_lambda(res, _lambda, use_normed=False, index_t1=True)\n\n # Get non-normalised results\n _lambda = get_lambda_from_normed_SSE(res, use_normed=True)\n ds_sse2, reg_sse2 = return_normalized_sse_and_normalized_sse_lambda(res, _lambda, use_normed=True, index_t1=True)\n\n # plot\n plot_SSE_new(ds_sse, reg_sse, ds_sse2, reg_sse2, data)\n\n####### ####### ##### ###### ##### ########## # # # #\n\ndef plot_SSE_new(ds_sse, reg_sse, ds_sse2, reg_sse2, data):\n\n f,ax = plt.subplots(2,2,figsize=(16,11))\n axs = ax.ravel()\n\n ####\n data[ds_sse.index[-1]:ds_sse.index[0]].plot(ax=axs[0], color='k', linewidth=3)\n axs[0].grid(True)\n axs[0].axvline(ds_sse[ds_sse==ds_sse.min()].dropna().index[0],\n color='k', linewidth=4, linestyle='--')\n axs[0].set_xlabel('')\n\n ####\n data[ds_sse.index[-1]:ds_sse.index[0]].plot(ax=axs[1], color='k', linewidth=3)\n axs[1].grid(True)\n axs[1].axvline(ds_sse2[ds_sse2==ds_sse2.min()].dropna().index[0],\n color='k', linewidth=4, linestyle='--')\n axs[1].set_xlabel('')\n\n ####\n ds_sse.plot(ax=axs[2], color='r', linewidth=3, marker='s', markersize=10)\n axs[2].axvline(ds_sse[ds_sse==ds_sse.min()].dropna().index[0],\n color='k', linewidth=4, linestyle='--')\n a = axs[2].twinx()\n reg_sse.plot(ax=a, color='k', linewidth=3, marker='o', markersize=10)\n axs[2].set_yticks([])\n a.set_yticks([])\n axs[2].legend(loc='upper right', fontsize=14)\n a.legend(loc='upper center', fontsize=14)\n axs[2].grid()\n\n ####\n ds_sse2.plot(ax=axs[3], color='r', linewidth=3, marker='s', markersize=10)\n axs[3].axvline(ds_sse2[ds_sse2==ds_sse2.min()].dropna().index[0],\n color='k', linewidth=4, linestyle='--')\n a = axs[3].twinx()\n reg_sse2.plot(ax=a, color='k', linewidth=3, marker='o', markersize=10)\n a.set_yticks([])\n axs[3].set_yticks([])\n axs[3].legend(loc='upper right', fontsize=14)\n a.legend(loc='upper center', fontsize=14)\n axs[3].grid()\n\n plt.tight_layout()\n\n###################################\ndef estimate_and_plot_modified_SSE_NEW(res, data):\n # Get _lambda\n #_lambda = get_lambda_from_normed_SSE(res, use_normed=False)\n #ds_sse, reg_sse = return_normalized_sse_and_normalized_sse_lambda(res, _lambda, use_normed=False, index_t1=True)\n\n # Get non-normalised results! NB! USE_NORMED = True in order to work properly\n _lambda = get_lambda_from_normed_SSE(res, use_normed=True)\n ds_sse2, reg_sse2 = return_normalized_sse_and_normalized_sse_lambda(res, _lambda, use_normed=True, index_t1=True)\n\n # Plot Function (TO BE ADDED !!!)\n out1 = pf.normalize_data_for_comparacy(ds_sse2, sklearn=True)\n out2 = pf.normalize_data_for_comparacy(reg_sse2, sklearn=True)\n\n return out1, out2\n\n\n###################################\ndef FinalPlot(data, res, ds_sse, reg_sse, t2):\n\n dahead = 250\n me = 3\n ms = 8\n lw = 1.7\n\n t1init = pd.Timestamp(t2) - 1200 * pd.Timedelta('1D')\n t1fin = pd.Timestamp(t2) + dahead * pd.Timedelta('1D')\n fakeIndex = pd.date_range(start=t2, end=t1fin)\n\n # FIT LPPLS ON THE Best DT\n bestT1 = ds_sse[ds_sse == ds_sse.min()]\n x = pd.Timestamp(t2) - pd.Timestamp(bestT1.dropna().index[0])\n x = x.days\n mres = lp.fit_series(data, t2, x)\n\n # Fake Axis\n newAxis = ds_sse.index.union(fakeIndex)\n na = pd.DataFrame(index=newAxis)\n ds_sse = pd.concat([na, ds_sse], axis=1)\n reg_sse = pd.concat([na, reg_sse], axis=1)\n\n\n fig, axes = plt.subplots(nrows=2, ncols=1, sharex='col', sharey=False,\n gridspec_kw={'height_ratios': [3, 2]},\n figsize=(6, 5))\n\n axes[0].set_title(r'$t_1 = $%s; $t_2 =$ %s'%(str(bestT1.dropna().index[0])[0:10], t2))\n data[t1init:t2].plot(ax=axes[0], color='k')\n data[t1init:t1fin].plot(ax=axes[0], linewidth=0.3, color='k')\n axes[0].axvline(t2, color='k', linewidth=3)\n axes[0].set_ylabel(r'$\\ln(P_t)$', fontsize=18)\n axes[0].axvline(bestT1.dropna().index[0],\n color='k', linewidth=3, linestyle='--')\n fit = lp.lppl_project(mres, data, days=dahead)\n fit['fit'].plot(ax=axes[0], linewidth=2.5, color='r', alpha=0.7)\n fit['projection'].plot(ax=axes[0], linewidth=2.5, color='r',\n linestyle=':')\n\n #axes[0].set_ylim([4.5, 5.2])\n #axes[0].set_ylim([6.5, 9])\n #stdd = data[t1init:t1fin].std()\n\n #axes[0].set_ylim([data[t1init:t1fin].min().values[0]-stdd,\n # data[t1init:t1fin].max().values[0]+stdd])\n axes[0].legend('')\n plt.tight_layout()\n\n # Subplots\n fig.set_tight_layout({'rect': [0, 0, 1, 0.95], 'pad': 1.5, 'h_pad': .0})\n axes[1].axvline(ds_sse[ds_sse == ds_sse.min()].dropna().index[0],\n color='k', linewidth=3, linestyle='--')\n axes[1].axvline(t2, color='k', linewidth=3)\n ds_sse.plot(ax=axes[1], color='k', marker='^', markevery=me,\n markersize=ms, markerfacecolor='w',\n linewidth=lw)\n axes[1].set_ylabel(r'$\\chi^2$', fontsize=18)\n a = axes[1].twinx()\n a.set_yticks([])\n reg_sse.plot(ax=a, color='k', marker='o', markevery=me,\n markersize=ms, markerfacecolor='w',\n linewidth=lw)\n axes[1].legend('')\n a.legend('')\n plt.tight_layout()\n plt.savefig('/Users/demos/Desktop/ttt'+str(t2)+'.pdf')\n\n\n###################################\ndef FinalPlotOLS(data, ds_sse, reg_sse):\n\n fig, axes = plt.subplots(nrows=2, ncols=1, sharex='col', sharey=False,\n gridspec_kw={'height_ratios': [2, 1]},\n figsize=(7, 7))\n axes[0].plot(data)\n\n axes[1].plot(ds_sse)\n axes[1].plot(reg_sse)\n plt.tight_layout()\n\n\n###################################\n# Post discussion with SP.\n###################################\ndef generateSyntheticLppls(params=None, noise=0.01):\n\n \"\"\"\"\n generate synthetic LPPLS with given noise level with N = 1000\n \"\"\"\n\n # params [tc, m, w, A, B, C1, C2]\n t = np.arange(0, 2000, 1)\n if params == None:\n params = [404.3, 0.748203, 6.226502,\n 1.762818, -7.263146e-04, 9.401988e-05,\n 8.613132e-05]\n else:\n params = params\n\n # Generate LPPLS\n sdata = lp.lppl(t, params, reduced=True)\n\n # Noise?\n noise = np.random.normal(0, noise, len(sdata))\n sdata = pd.DataFrame(sdata + noise)\n sdata.index = pd.date_range(start='1910-01-01', periods=len(sdata))\n\n return sdata\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "functions_lagrange.py", "file_name": "functions_lagrange.py", "file_ext": "py", "file_size_in_byte": 16549, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.rcParams", "line_number": 21, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.random.standard_normal", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.random.standard_normal", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 38, "usage_type": "attribute"}, {"api_name": "statsmodels.regression.linear_model.OLS", "line_number": 55, "usage_type": "call"}, {"api_name": "statsmodels.regression", "line_number": 55, "usage_type": "attribute"}, {"api_name": "statsmodels.regression.linear_model.OLS", "line_number": 61, "usage_type": "call"}, {"api_name": "statsmodels.regression", "line_number": 61, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 148, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 207, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 207, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 248, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.random.standard_normal", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 271, "usage_type": "attribute"}, {"api_name": "numpy.random.standard_normal", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 272, "usage_type": "attribute"}, {"api_name": "statsmodels.regression.linear_model.OLS", "line_number": 293, "usage_type": "call"}, {"api_name": "statsmodels.regression", "line_number": 293, "usage_type": "attribute"}, {"api_name": "pandas.Timestamp", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 319, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 319, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 322, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 325, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 327, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 331, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 333, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 355, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 355, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 396, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 396, "usage_type": "name"}, {"api_name": "portfolio_functions.normalize_data_for_comparacy", "line_number": 409, "usage_type": "call"}, {"api_name": "portfolio_functions.normalize_data_for_comparacy", "line_number": 410, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 423, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 423, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 424, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 424, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 425, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 429, "usage_type": "call"}, {"api_name": "pylppl.fit_series", "line_number": 431, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 435, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 436, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 437, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 440, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 440, "usage_type": "name"}, {"api_name": "pylppl.lppl_project", "line_number": 451, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 463, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 463, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 481, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 481, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 482, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 482, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 488, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 488, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 495, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 495, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 508, "usage_type": "call"}, {"api_name": "pylppl.lppl", "line_number": 517, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 520, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 520, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 521, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 522, "usage_type": "call"}]} +{"seq_id": "448832529", "text": "# standard library modules, , ,\nimport string\nimport os\nimport logging\n\n# Cheetah, pip install cheetah, string templating, MIT\nimport Cheetah.Template\n\n# fsutils, , misc filesystem utils, internal\nimport fsutils\n# validate, , validate various things, internal\nimport validate\n\nCMakeLists_Template = '''\n# NOTE: This file is generated by yotta: changes will be overwritten!\n\n#if $toplevel\ncmake_minimum_required(VERSION 2.8)\n\n# always use the CMAKE_MODULE_PATH-provided .cmake files, even when including\n# from system directories:\ncmake_policy(SET CMP0017 OLD)\n\n# toolchain file for $target_name\nset(CMAKE_TOOLCHAIN_FILE $toolchain_file)\n\n$set_targets_like\n#end if\n\nproject($component_name)\n\n# include own root directory\n#echo $include_own_dir\n\n# include root directories of all components we depend on (directly and\n# indirectly)\n$include_root_dirs\n\n# recurse into dependencies that aren't built elsewhere\n$add_depend_subdirs\n\n\n# Some components (I'm looking at you, libc), need to export system header\n# files with no prefix, these directories are listed in the component\n# description files:\n$include_sys_dirs\n\n# And others (typically CMSIS implementations) need to export non-system header\n# files. Please don't use this facility. Please. It's much, much better to fix\n# implementations that import these headers to import them using the full path.\n$include_other_dirs\n\n# CMake doesn't have native support for Objective-C specific flags, these are\n# specified by any depended-on objc runtime using secret package properties...\nset(CMAKE_OBJC_FLAGS \"$set_objc_flags\")\n\n# Components may defined additional preprocessor definitions: use this at your\n# peril, this support WILL go away! (it's here to bridge toolchain component ->\n# target package switchover)\nget_property(EXTRA_DEFINITIONS GLOBAL PROPERTY YOTTA_GLOBAL_DEFINITIONS)\n#raw\nadd_definitions($${EXTRA_DEFINITIONS})\n#end raw\n\n\n# !!! FIXME: maybe the target can just add these to the toolchain, no need\n# for repetition in every single cmake list\n# Build targets may define additional preprocessor definitions for all\n# components to use (such as chip variant information)\nadd_definitions($yotta_target_definitions)\n\n# Provide the version of the component being built, in case components want to\n# embed this into compiled libraries\nset(YOTTA_COMPONENT_VERSION \"$component_version\")\n\n# recurse into subdirectories for this component, using the two-argument\n# add_subdirectory because the directories referred to here exist in the source\n# tree, not the working directory\n$add_own_subdirs\n\n'''\n\nSubdir_CMakeLists_Template = '''\n\\# NOTE: This file is generated by yotta: changes will be overwritten!\n\ncmake_minimum_required(VERSION 2.8)\n\ninclude_directories(\"$source_directory\")\n\n#if $executable\nadd_executable($object_name\n #echo ' ' + '\\\\n '.join('\"'+x+'\"' for x in $file_names) + '\\\\n'\n)\n#else\nadd_library($object_name\n #echo ' ' + '\\\\n '.join('\"'+x+'\"' for x in $file_names) + '\\\\n'\n)\n#end if\n\n#if 'objc' in $languages\n\\# no proper CMake support for objective-c flags :(\nset_target_properties($object_name PROPERTIES\n COMPILE_FLAGS \"\\${CMAKE_OBJC_FLAGS}\"\n)\n#end if\n\ntarget_link_libraries($object_name\n #echo ' ' + '\\\\n '.join($link_dependencies) + '\\\\n'\n)\n\n'''\n\n#this is a Cheetah template\nTest_CMakeLists_Template = '''\n\\# NOTE: This file is generated by yotta: changes will be overwritten!\n\nenable_testing()\n\ninclude_directories(\"$source_directory\")\n\n#for $file_names, $object_name, $languages in $tests\nadd_executable($object_name\n #echo ' ' + '\\\\n '.join('\"'+x+'\"' for x in $file_names) + '\\\\n'\n)\n#if 'objc' in $languages\n\\# no proper CMake support for objective-c flags :(\nset_target_properties($object_name PROPERTIES\n COMPILE_FLAGS \"\\${CMAKE_OBJC_FLAGS}\"\n)\n#end if\ntarget_link_libraries($object_name\n #echo ' ' + '\\\\n '.join($link_dependencies) + '\\\\n'\n)\nadd_test($object_name $object_name)\n\n#end for\n'''\n\n\nlogger = logging.getLogger('cmakegen')\n\nIgnore_Subdirs = set(('build','yotta_modules', 'yotta_targets', 'CMake'))\n\n\nclass SourceFile(object):\n def __init__(self, fname, lang):\n super(SourceFile, self).__init__()\n self.fname = fname\n self.lang = lang\n def __repr__(self):\n return self.fname\n def lang(self):\n return self.lang\n\nclass CMakeGen(object):\n def __init__(self, directory, target):\n super(CMakeGen, self).__init__()\n self.buildroot = directory\n logger.info(\"generate for target: %s\" % target)\n self.target = target\n\n def generateRecursive(self, component, all_components, builddir=None, processed_components=None):\n ''' generate top-level CMakeLists for this component and its\n dependencies: the CMakeLists are all generated in self.buildroot,\n which MUST be out-of-source\n\n !!! NOTE: experimenting with a slightly different way of doing\n things here, this function is a generator that yields any errors\n produced, so the correct use is:\n\n for error in gen.generateRecursive(...):\n print error\n '''\n if builddir is None:\n builddir = self.buildroot\n if processed_components is None:\n processed_components = dict()\n if not self.target:\n yield 'Target \"%s\" is not a valid build target' % self.target\n\n toplevel = not len(processed_components)\n \n logger.debug('generate build files: %s (target=%s)' % (component, self.target))\n # because of the way c-family language includes work we need to put the\n # public header directories of all components that this component\n # depends on (directly OR indirectly) into the search path, which means\n # we need to first enumerate all the direct and indirect dependencies\n recursive_deps = component.getDependenciesRecursive(\n available_components = all_components,\n target = self.target,\n available_only = True\n )\n dependencies = component.getDependencies(\n all_components,\n target = self.target,\n available_only = True\n )\n\n for name, dep in dependencies.items():\n if not dep:\n yield 'Required dependency \"%s\" of \"%s\" is not installed.' % (name, component)\n # ensure this component is assumed to have been installed before we\n # check for its dependencies, in case it has a circular dependency on\n # itself\n processed_components[component.getName()] = component\n new_dependencies = {name:c for name,c in dependencies.items() if c and not name in processed_components}\n self.generate(builddir, component, new_dependencies, dependencies, recursive_deps, toplevel)\n\n logger.debug('recursive deps of %s:' % component)\n for d in recursive_deps.values():\n logger.debug(' %s' % d)\n\n processed_components.update(new_dependencies)\n for name, c in new_dependencies.items():\n for error in self.generateRecursive(c, all_components, os.path.join(builddir, name), processed_components):\n yield error\n\n def checkStandardSourceDir(self, dirname, component):\n err = validate.sourceDirValidationError(dirname, component.getName())\n if err:\n logger.warn(err)\n\n def generate(self, builddir, component, active_dependencies, immediate_dependencies, all_dependencies, toplevel):\n ''' active_dependencies is the dictionary of components that need to be\n built for this component, but will not already have been built for\n another component.\n '''\n\n include_own_dir = string.Template(\n 'include_directories(\"$path\")\\n'\n ).substitute(path=component.path)\n\n include_root_dirs = ''\n include_sys_dirs = ''\n include_other_dirs = ''\n objc_flags_set = {}\n objc_flags = []\n for name, c in all_dependencies.items():\n include_root_dirs += string.Template(\n 'include_directories(\"$path\")\\n'\n ).substitute(path=c.path)\n dep_sys_include_dirs = c.getExtraSysIncludes()\n for d in dep_sys_include_dirs:\n include_sys_dirs += string.Template(\n 'include_directories(SYSTEM \"$path\")\\n'\n ).substitute(path=os.path.join(c.path, d))\n dep_extra_include_dirs = c.getExtraIncludes()\n for d in dep_extra_include_dirs:\n include_other_dirs += string.Template(\n 'include_directories(\"$path\")\\n'\n ).substitute(path=os.path.join(c.path, d))\n for name, c in all_dependencies.items() + [(component.getName(), component)]:\n dep_extra_objc_flags = c.getExtraObjcFlags()\n # Try to warn Geraint when flags are clobbered. This will probably\n # miss some obscure flag forms, but it tries pretty hard\n for f in dep_extra_objc_flags:\n flag_name = None\n if len(f.split('=')) == 2:\n flag_name = f.split('=')[0]\n elif f.startswith('-fno-'):\n flag_name = f[5:]\n elif f.startswith('-fno'):\n flag_name = f[4:]\n elif f.startswith('-f'):\n flag_name = f[2:]\n if flag_name is not None:\n if flag_name in objc_flags_set and objc_flags_set[flag_name] != name:\n logger.warning(\n 'component %s Objective-C flag \"%s\" clobbers a value earlier set by component %s' % (\n name, f, objc_flags_set[flag_name]\n ))\n objc_flags_set[flag_name] = name\n objc_flags.append(f)\n set_objc_flags = ' '.join(objc_flags)\n\n add_depend_subdirs = ''\n for name, c in active_dependencies.items():\n add_depend_subdirs += string.Template(\n 'add_subdirectory(\"$working_dir/$component_name\")\\n'\n ).substitute(\n working_dir=builddir,\n component_name=name\n )\n \n binary_subdirs = {os.path.normpath(x) : y for x,y in component.getBinaries().items()};\n manual_subdirs = []\n autogen_subdirs = []\n for f in os.listdir(component.path):\n if f in Ignore_Subdirs or f.startswith('.') or f.startswith('_'):\n continue\n if os.path.isfile(os.path.join(component.path, f, 'CMakeLists.txt')):\n self.checkStandardSourceDir(f, component)\n # if the subdirectory has a CMakeLists.txt in it, then use that\n manual_subdirs.append(f)\n elif f in ('source', 'test') or os.path.normpath(f) in binary_subdirs:\n # otherwise, if the directory has source files, generate a\n # CMakeLists in the corresponding temporary directory, and add\n # that.\n # For now we only do this for the source and test directories -\n # in theory we could do others\n sources = self.containsSourceFiles(os.path.join(component.path, f))\n if sources:\n autogen_subdirs.append((f, sources))\n elif f.lower() in ('source', 'src', 'test'):\n self.checkStandardSourceDir(f, component)\n\n add_own_subdirs = ''\n for f in manual_subdirs:\n if os.path.isfile(os.path.join(component.path, f, 'CMakeLists.txt')):\n add_own_subdirs += string.Template(\n '''add_subdirectory(\n \"$component_source_dir/$subdir_name\"\n \"$working_dir/$subdir_name\"\n)\n'''\n ).substitute(\n component_source_dir = component.path,\n working_dir = builddir,\n subdir_name = f\n )\n\n # names of all directories at this level with stuff in: used to figure\n # out what to link automatically\n all_subdirs = manual_subdirs + [x[0] for x in autogen_subdirs]\n for f, source_files in autogen_subdirs:\n if f in binary_subdirs:\n exe_name = binary_subdirs[f]\n else:\n exe_name = None\n self.generateSubDirList(builddir, f, source_files, component, all_subdirs, immediate_dependencies, exe_name);\n add_own_subdirs += string.Template(\n '''add_subdirectory(\n \"$working_dir/$subdir_name\"\n \"$working_dir/$subdir_name\"\n)\n'''\n ).substitute(\n working_dir = builddir,\n subdir_name = f\n )\n\n \n def sanitizeTarget(t):\n return t.replace('-', '_').upper()\n\n target_definitions = '-DTARGET=' + sanitizeTarget(self.target.getName()) + ' '\n set_targets_like = 'set(TARGET_LIKE_' + sanitizeTarget(self.target.getName()) + ' TRUE)\\n'\n for target in self.target.dependencyResolutionOrder():\n if '*' not in target:\n target_definitions += '-DTARGET_LIKE_' + sanitizeTarget(target) + ' '\n set_targets_like += 'set(TARGET_LIKE_' + sanitizeTarget(target) + ' TRUE)\\n'\n\n\n file_contents = str(Cheetah.Template.Template(CMakeLists_Template, searchList=[{\n \"toplevel\": toplevel,\n \"target_name\": self.target.getName(),\n \"set_targets_like\": set_targets_like,\n \"toolchain_file\": self.target.getToolchainFile(),\n \"component_name\": component.getName(),\n \"include_own_dir\": include_own_dir,\n \"include_root_dirs\": include_root_dirs,\n \"include_sys_dirs\": include_sys_dirs,\n \"include_other_dirs\": include_other_dirs,\n \"set_objc_flags\": set_objc_flags,\n \"add_depend_subdirs\": add_depend_subdirs,\n \"add_own_subdirs\": add_own_subdirs,\n \"yotta_target_definitions\": target_definitions,\n \"component_version\": component.getVersion()\n }]))\n fsutils.mkDirP(builddir)\n fname = os.path.join(builddir, 'CMakeLists.txt')\n self.writeIfDifferent(fname, file_contents)\n\n def writeIfDifferent(self, fname, contents):\n try:\n with open(fname, \"r+\") as f:\n current_contents = f.read()\n if current_contents != contents: \n f.seek(0)\n f.write(contents)\n f.truncate()\n except IOError:\n with open(fname, \"w\") as f:\n f.write(contents)\n\n\n def generateSubDirList(self, builddir, dirname, source_files, component, all_subdirs, immediate_dependencies, executable_name):\n logger.debug('generate CMakeLists.txt for directory: %s' % os.path.join(component.path, dirname))\n\n link_dependencies = [x for x in immediate_dependencies]\n fname = os.path.join(builddir, dirname, 'CMakeLists.txt') \n\n # if the directory name is 'test' then, then generate multiple\n # independent executable targets:\n if dirname == 'test':\n tests = []\n for f in source_files:\n object_name = component.getName() + '-' + os.path.basename(os.path.splitext(str(f))[0]).lower()\n tests.append([[str(f)], object_name, [f.lang]])\n\n # link tests against the main executable\n link_dependencies.append(component.getName())\n file_contents = str(Cheetah.Template.Template(Test_CMakeLists_Template, searchList=[{\n 'source_directory':os.path.join(component.path, dirname),\n 'tests':tests,\n 'link_dependencies':link_dependencies\n }]))\n elif dirname == 'source' or executable_name:\n if executable_name:\n object_name = executable_name\n executable = True\n else:\n object_name = component.getName()\n executable = False\n # if we're building the main library, or an executable for this\n # component, then we should link against all the other directories\n # containing cmakelists:\n link_dependencies += [x for x in all_subdirs if x not in ('source', 'test', dirname)]\n \n file_contents = str(Cheetah.Template.Template(Subdir_CMakeLists_Template, searchList=[{\n 'source_directory':os.path.join(component.path, dirname),\n 'executable':executable,\n 'file_names':[str(f) for f in source_files],\n 'object_name':object_name,\n 'link_dependencies':link_dependencies,\n 'languages':set(f.lang for f in source_files)\n }]))\n else:\n raise Exception('auto CMakeLists for non-source/test directories is not supported')\n fsutils.mkDirP(os.path.join(builddir, dirname))\n self.writeIfDifferent(fname, file_contents);\n\n\n def containsSourceFiles(self, directory):\n c_exts = set(('.c',))\n cpp_exts = set(('.cpp','.cc','.cxx'))\n objc_exts = set(('.m', '.mm'))\n \n sources = []\n for root, dires, files in os.walk(directory):\n for f in files:\n name, ext = os.path.splitext(f)\n ext = ext.lower()\n if ext in c_exts:\n sources.append(SourceFile(os.path.join(root, f), 'c'))\n elif ext in cpp_exts:\n sources.append(SourceFile(os.path.join(root, f), 'cpp'))\n elif ext in objc_exts:\n sources.append(SourceFile(os.path.join(root, f), 'objc'))\n return sources\n", "sub_path": "yotta/lib/cmakegen.py", "file_name": "cmakegen.py", "file_ext": "py", "file_size_in_byte": 17980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "validate.sourceDirValidationError", "line_number": 219, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 229, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 239, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 244, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}, {"api_name": "string.Template", "line_number": 249, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 251, "usage_type": "call"}, {"api_name": "os.path", "line_number": 251, "usage_type": "attribute"}, {"api_name": "string.Template", "line_number": 278, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 285, "usage_type": "call"}, {"api_name": "os.path", "line_number": 285, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 291, "usage_type": "call"}, {"api_name": "os.path", "line_number": 291, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 291, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 295, "usage_type": "call"}, {"api_name": "os.path", "line_number": 295, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 301, "usage_type": "call"}, {"api_name": "os.path", "line_number": 301, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 309, "usage_type": "call"}, {"api_name": "os.path", "line_number": 309, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 309, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 310, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 331, "usage_type": "call"}, {"api_name": "Cheetah.Template.Template.Template", "line_number": 354, "usage_type": "call"}, {"api_name": "Cheetah.Template.Template", "line_number": 354, "usage_type": "attribute"}, {"api_name": "Cheetah.Template", "line_number": 354, "usage_type": "name"}, {"api_name": "fsutils.mkDirP", "line_number": 370, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 371, "usage_type": "call"}, {"api_name": "os.path", "line_number": 371, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 388, "usage_type": "call"}, {"api_name": "os.path", "line_number": 388, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 391, "usage_type": "call"}, {"api_name": "os.path", "line_number": 391, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 398, "usage_type": "call"}, {"api_name": "os.path", "line_number": 398, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 398, "usage_type": "call"}, {"api_name": "Cheetah.Template.Template.Template", "line_number": 403, "usage_type": "call"}, {"api_name": "Cheetah.Template.Template", "line_number": 403, "usage_type": "attribute"}, {"api_name": "Cheetah.Template", "line_number": 403, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 404, "usage_type": "call"}, {"api_name": "os.path", "line_number": 404, "usage_type": "attribute"}, {"api_name": "Cheetah.Template.Template.Template", "line_number": 420, "usage_type": "call"}, {"api_name": "Cheetah.Template.Template", "line_number": 420, "usage_type": "attribute"}, {"api_name": "Cheetah.Template", "line_number": 420, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 421, "usage_type": "call"}, {"api_name": "os.path", "line_number": 421, "usage_type": "attribute"}, {"api_name": "fsutils.mkDirP", "line_number": 430, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 430, "usage_type": "call"}, {"api_name": "os.path", "line_number": 430, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 440, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 442, "usage_type": "call"}, {"api_name": "os.path", "line_number": 442, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 445, "usage_type": "call"}, {"api_name": "os.path", "line_number": 445, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 447, "usage_type": "call"}, {"api_name": "os.path", "line_number": 447, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 449, "usage_type": "call"}, {"api_name": "os.path", "line_number": 449, "usage_type": "attribute"}]} +{"seq_id": "196650158", "text": "from django.conf.urls import include, url\nfrom rest_framework import routers\nfrom views import OwnerAPIView, DogAPIView, CatAPIView\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'owner', OwnerAPIView)\nrouter.register(r'dog', DogAPIView)\nrouter.register(r'cat', CatAPIView)\n\nurlpatterns = [\n url('^', include(router.urls))\n]\n", "sub_path": "app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 333, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 6, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 6, "usage_type": "name"}, {"api_name": "views.OwnerAPIView", "line_number": 7, "usage_type": "argument"}, {"api_name": "views.DogAPIView", "line_number": 8, "usage_type": "argument"}, {"api_name": "views.CatAPIView", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "282300902", "text": "import time\nfrom core.helpers import try_convert\nfrom plex.media_server import PlexMediaServer\nfrom pts.activity import ActivityMethod, PlexActivity\nfrom pts.scrobbler_websocket import WebSocketScrobbler\nimport websocket\n\n\nclass WebSocket(ActivityMethod):\n name = 'WebSocket'\n\n opcode_data = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)\n\n def __init__(self, now_playing):\n super(WebSocket, self).__init__(now_playing)\n\n self.ws = None\n self.reconnects = 0\n\n self.scrobbler = WebSocketScrobbler()\n\n @classmethod\n def test(cls):\n if PlexMediaServer.request('status/sessions', catch_exceptions=True) is None:\n Log.Info(\"Error while retrieving sessions, assuming WebSocket method isn't available\")\n return False\n\n server_info = PlexMediaServer.request(catch_exceptions=True)\n if not server_info:\n Log.Info('Error while retrieving server info for testing')\n return False\n\n multi_user = bool(server_info.get('multiuser', 0))\n if not multi_user:\n Log.Info(\"Server info indicates multi-user support isn't available, WebSocket method not available\")\n return False\n\n return True\n\n def connect(self):\n self.ws = websocket.create_connection('ws://localhost:32400/:/websockets/notifications')\n\n def run(self):\n self.connect()\n\n while True:\n try:\n self.process(*self.receive())\n\n # successfully received data, reset reconnects counter\n self.reconnects = 0\n\n except websocket.WebSocketConnectionClosedException:\n if self.reconnects <= 5:\n self.reconnects = self.reconnects + 1\n\n # Increasing sleep interval between reconnections\n if self.reconnects > 1:\n time.sleep(2 * (self.reconnects - 1))\n\n Log.Info('WebSocket connection has closed, reconnecting...')\n self.connect()\n else:\n Log.Error('WebSocket connection unavailable, activity monitoring not available')\n break\n\n def receive(self):\n frame = self.ws.recv_frame()\n\n if not frame:\n raise websocket.WebSocketException(\"Not a valid frame %s\" % frame)\n elif frame.opcode in self.opcode_data:\n return frame.opcode, frame.data\n elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:\n self.ws.send_close()\n return frame.opcode, None\n elif frame.opcode == websocket.ABNF.OPCODE_PING:\n self.ws.pong(\"Hi!\")\n\n return None, None\n\n def process(self, opcode, data):\n if opcode not in self.opcode_data:\n return\n\n info = JSON.ObjectFromString(data)\n item = info['_children'][0]\n\n if info['type'] == \"playing\" and Dict[\"scrobble\"]:\n session_key = str(item['sessionKey'])\n state = str(item['state'])\n view_offset = try_convert(item['viewOffset'], int)\n\n self.scrobbler.update(session_key, state, view_offset)\n\n if info['type'] == \"timeline\" and Dict['new_sync_collection']:\n if item['type'] not in [1, 4]:\n return\n\n if item['state'] == 0:\n Log.Info(\"New File added to Libray: \" + item['title'] + ' - ' + str(item['itemID']))\n\n self.update_collection(item['itemID'], 'add')\n\nPlexActivity.register(WebSocket, weight=10)\n", "sub_path": "NextPVR.bundle/Contents/Code/pts/activity_websocket.py", "file_name": "activity_websocket.py", "file_ext": "py", "file_size_in_byte": 3540, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pts.activity.ActivityMethod", "line_number": 9, "usage_type": "name"}, {"api_name": "websocket.ABNF", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pts.scrobbler_websocket.WebSocketScrobbler", "line_number": 20, "usage_type": "call"}, {"api_name": "plex.media_server.PlexMediaServer.request", "line_number": 24, "usage_type": "call"}, {"api_name": "plex.media_server.PlexMediaServer", "line_number": 24, "usage_type": "name"}, {"api_name": "plex.media_server.PlexMediaServer.request", "line_number": 28, "usage_type": "call"}, {"api_name": "plex.media_server.PlexMediaServer", "line_number": 28, "usage_type": "name"}, {"api_name": "websocket.create_connection", "line_number": 41, "usage_type": "call"}, {"api_name": "websocket.WebSocketConnectionClosedException", "line_number": 53, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 59, "usage_type": "call"}, {"api_name": "websocket.WebSocketException", "line_number": 71, "usage_type": "call"}, {"api_name": "websocket.ABNF", "line_number": 74, "usage_type": "attribute"}, {"api_name": "websocket.ABNF", "line_number": 77, "usage_type": "attribute"}, {"api_name": "core.helpers.try_convert", "line_number": 92, "usage_type": "call"}, {"api_name": "pts.activity.PlexActivity.register", "line_number": 105, "usage_type": "call"}, {"api_name": "pts.activity.PlexActivity", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "502745484", "text": "import numpy as np\nimport rospy\nimport utils\nfrom std_msgs.msg import String\nimport time\nfrom sensor_msgs.msg import JointState\n\nrospy.init_node(\"track_ik\")\n\n# Define Dmp and track_ik instance from utils\ndmp = utils.DMP()\ninitial_pose = np.loadtxt(\"initial_pose_ur10.txt\",delimiter= ',')\ninitial_joints = np.loadtxt(\"initial_joints_ur10.txt\",delimiter= ',')\nrobot = utils.move()\nrobot.ref_pos = initial_joints[0]\nrobot.bin_pos = initial_joints[2]\n\nref_pose = initial_pose[0]\ntarget_pose = initial_pose[1]\n\nprint(initial_pose)\n\n# robot.ref_pos = ref_pose\n# robot.bin_pos = bin_pose\n# dmp.y0 = ref_pose # DMP Start: Starting end-effector position of manipulator\n# dmp.G = target_pose # Target start location\nrobot.move_view()\nwhile not rospy.is_shutdown():\n\t# robot.move_view()\n\tch = raw_input(\"Continue ? (y/n)\\n\")\n\tif (ch == 'y'):\n\t\tpass\n\telse:\n\t\tprint(\"Good Bye\\n\")\n\t\tbreak;\n\ty_track,d_track = dmp.step(target_pose[:3], ref_pose[:3],target_pose[3:])\n\t# print(target_pose, ref_pose)\n\trobot.move_servoj(y_track,d_track)\n\trobot.move_bin()\n\trobot.move_view()\n\t\n", "sub_path": "src/track_ik/trac_ik_python/scripts/ur_trakik.py", "file_name": "ur_trakik.py", "file_ext": "py", "file_size_in_byte": 1058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "rospy.init_node", "line_number": 8, "usage_type": "call"}, {"api_name": "utils.DMP", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.move", "line_number": 14, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "329600275", "text": "import ply.lex as lex\nimport re\nimport codecs\nimport os\nimport sys\nfrom os import remove\n\narchivo = open (\"tokens.txt\", \"w\")\nerrores = open (\"errores.txt\", \"w\")\nts= open(\"tabla_simbolos.txt\",\"w\")\ntslocal= open(\"tslocal.txt\",\"w\")\n\ntablaSimbolos= {}\ntablaSimboloslocal={}\nnumID=1\nnumIDlocal=1\ndec = False #saber si estamos en modo declaracion \nfunc= False #si estamos declarando una funcion \nfuncargs= False \ntLocal= False\n\ntipo= 'entero'\ndesplaz=0 \t\t#nota- hacerlo en una clase el desplazamiento \nndesplaz=0\ndesplazLocal=0\nndesplazLocal=0\nnTabla=1\n\ntipoparam=[]\nnombrefun=None\ntiporetorno=None\nnparam=0\n\n\nts.write(\"TABLA PRINCIPAL #\"+ str(nTabla)+ \":\" + '\\n')\nnTabla+=1\n\nreservadas = ['BOOLEAN', 'FUNCTION', 'IF', 'INPUT', 'INT', 'PRINT', 'RETURN', 'STRING', \n\t\t\t\t'VAR', 'WHILE'\n\t\t]\n\ntokens = reservadas+['entero', 'cadena', 'id', 'asig', 'igual', 'coma', 'puntcoma', 'parentA', \n\t\t\t\t'parentC', 'corcheteA', 'corcheteC', 'suma', 'or', 'menor'\n\t\t]\n\n\nt_ignore = ' \\t\\r'\nt_asig= r'&='\nt_igual=r'='\nt_coma=r','\n\nt_parentC=r'\\)'\n\nt_suma=r'\\+'\nt_or=r'\\|\\|'\nt_menor= r'<'\n\ndef t_puntcoma(t):\n\tr';'\n\tdeclaracion= False\n\treturn t\n\ndef t_parentA(t):\n\tr'\\('\n\tglobal func, nTabla, tLocal, funcargs, nombrefun, ndesplazLocal, desplazLocal\n\tif func== True:\n\t\ttLocal=True\n\t\ttslocal.write(\"\\n TABLA de la FUNCION \"+ str(nombrefun) +\" #\"+ str(nTabla)+ \":\" + '\\n')\n\t\tnombrefun=None\n\t\tnTabla+=1\n\t\tfunc=False\n\t\tfuncargs=True\n\treturn t\ndef t_corcheteA(t):\n\tr'\\{'\n\tglobal funcargs\n\tfuncargs= False\n\treturn t\ndef t_corcheteC(t):\n\tr'\\}'\n\tglobal tLocal, nparam, tiporetorno, ndesplazLocal, desplazLocal\n\tif tLocal==True: # Si nos encontamos dentro de la declaracion de una funcion salimos de ella, retornamos a la ts global\n\t\t#anadimos la informacion de la funcion \n\t\tts.write(\"\\t + numParam: \"+ str(nparam) + '\\n')\n\t\tfor i in range(len(tipoparam)):\n\t\t\tts.write(\"\\t + TipoParam\"+str(i+1)+\": \"+ \"'\"+ str(tipoparam[i])+ \"'\"+ '\\n')\n\t\tts.write(\"\\t + tiporetorno: \"+ \"'\"+str(tiporetorno)+ \"'\" + '\\n')\n\t\ttLocal=False\n\t\ttiporetorno=None\n\t\tdel tipoparam [:]\n\t\ttablaSimboloslocal.clear()\n\t\tnumIDlocal=1\n\t\tndesplazLocal=0\n\t\tdesplazLocal=0\n\t\tnparam=0\n\treturn t\n\ndef t_id(t):\n\tr'[a-zA-Z_][a-zA-Z0-9_]*'\n\tglobal dec, ndesplaz,numID, desplaz, tipo, func, tiporetorno, nparam, tipoparam, nombrefun, funcargs, tLocal, ndesplazLocal, desplazLocal\n\tif t.value.upper() in reservadas:\n\t\tt.value = t.value.upper()\n\t\tif (t.value == 'VAR'): #entramos en asignacion\t\n\t\t\tdec =True\n\t\tif (dec == True or funcargs== True):\n\t\t\tif (t.value == 'BOOLEAN'):\n\t\t\t\ttipo ='logico'\n\n\t\t\telif(t.value== 'STRING'):\n\t\t\t\ttipo= 'cadena'\n\t\t\t\t\n\t\t\tif (func==False):\n\t\t\t\tif (t.value== 'BOOLEAN' or t.value =='INT'):\n\t\t\t\t\tif tLocal==True: \n\t\t\t\t\t\tndesplazLocal+=2\n\t\t\t\t\telse: \n\t\t\t\t\t\tndesplaz+=2\n\t\t\t\tif (t.value== 'STRING'):\n\t\t\t\t\tif tLocal==True: \n\t\t\t\t\t\tndesplazLocal+=128\n\t\t\t\t\telse: \n\t\t\t\t\t\tndesplaz+=128\n\t\tif ( func==True):\n\t\t\tif (t.value == 'BOOLEAN'):\n\t\t\t\ttiporetorno ='logico'\n\t\t\telif(t.value== 'STRING'):\n\t\t\t\ttiporetorno= 'cadena'\n\t\t\telif(t.value=='INT'):\n\t\t\t\ttiporetorno= 'entero'\n\t\tif (funcargs==True ):\n\t\t\ttipoparam.append(tipo)\n\t\t\tnparam+=1\n\t\tif (t.value=='FUNCTION'):\n\t\t\tfunc=True\n\t\t\ttipo= 'funcion'\n\n\t\tt.type = t.value\n\telse:\n\t\tif tLocal==True: #estamos dentro de la declaracion de funciones \n\t\t\tif (tablaSimboloslocal.get(t.value)==None): #si la variable no esta en la tabla local \n\t\t\t\tif (tablaSimbolos.get(t.value)==None): #miro si esta en la tabla global, sino esta la meto en la local \n\t\t\t\t\tglobal numIDlocal\n\t\t\t\t\ttablaSimboloslocal[t.value]=numIDlocal\n\t\t\t\t\tval=t.value\n\t\t\t\t\tt.value=numIDlocal\n\t\t\t\t\tnumIDlocal+=1\n\t\t\t\t\ttslocal.write(\"* LEXEMA: \"+ \"'\"+str(val)+\"'\" + '\\n' + \n\t\t\t\t\t\t\"\\t +tipo: \" + \"'\"+ str(tipo)+\"'\"+ '\\n'+\n\t\t\t\t\t\t\"\\t +despl: \"+ str(desplazLocal) +'\\n' )\n\t\t\t\t\tdesplazLocal= ndesplazLocal\n\n\n\t\t\t\telse:\n\t\t\t\t\tt.value=tablaSimbolos.get(t.value)\n\t\t\telse:\n\t\t\t\tt.value=tablaSimboloslocal.get(t.value)\n\n\t\t\t\n\t\telse: #estamos en la tabla global \n\t\t\tif (func==True):\n\t\t\t\tnombrefun = t.value\n\t\t\tif (tablaSimbolos.get(t.value)==None):\n\t\t\t\ttablaSimbolos[t.value]=numID\n\t\t\t\tval=t.value\n\t\t\t\tt.value=numID\n\t\t\t\tnumID+=1\n\t\t\t\tif (func == True):\n\t\t\t\t\tts.write(\"* LEXEMA: \"+ \"'\"+str(val)+ \"'\"+'\\n' + \n\t\t\t\t\t\t\"\\t +tipo: \" +\"'\"+ str(tipo)+\"'\"+ '\\n' )\n\t\t\t\telse:\n\t\t\t\t\tts.write(\"* LEXEMA: \"+ \"'\"+str(val)+ \"'\" +'\\n' + \n\t\t\t\t\t\t\"\\t +tipo:\" + \"'\"+ str(tipo)+\"'\"+ '\\n'+\n\t\t\t\t\t\t\"\\t +despl: \"+ str(desplaz) +'\\n' )\n\t\t\t\tdesplaz=ndesplaz\n\t\t\t\tdec= False #volvemos a los valores por defecto\n\t\t\t\ttipo='entero' \n\t\t\telse:\n\t\t\t\tt.value=tablaSimbolos.get(t.value)\n\t\t\t\n\treturn t\n\ndef t_newline(t):\n\tr'\\n+'\n\tt.lexer.lineno += len(t.value)\n\t\n\n\ndef t_entero(t):\n\tr'\\d+'\n\tt.value = int(t.value)\n\tif (t.value<=32767):\n\t\treturn t\n\telse:\n\t\terrores.write(\"Error del analizador lexico en la linea:\" + str(t.lexer.lineno) + \" Entero mayor de 32767 \" + \"\\n\")\n\n\ndef t_comentario(t):\n\tr'/\\*(.|\\n)*?\\*/'\n\tt.lexer.lineno += t.value.count('\\n')\n\tpass\n\ndef t_cadena(t):\n\tr'(\"[^\"]*\")'\n\tif (len(t.value)-2<64):\n\t\treturn t\n\telse:\n\t\terrores.write(\"Error del analizador lexico en la linea:\" + str(t.lexer.lineno) + \" Cadena de longidud incorrecta \" + \"\\n\")\n\n\ndef t_error(t):\n\t#print \"caracter ilegal '%s'\" % t.value[0]\n\t#print \"linea '%s'\" % t.lexer.lineno\n\terrores.write(\"Error del analizador lexico en la linea:\" + str(t.lexer.lineno) + \" Token ilegal: \" + str(t.value[0]) + \"\\n\")\n\tt.lexer.skip(1)\n\nif __name__=='__main__':\n\tif len (sys.argv) != 2 :\n\t\tprint (\"Hay que pasar 1 solo archivo\")\n\n\ttest = sys.argv[1]\n\n\t#test='/Users/Loreto/Documents/Universidad /Procesadores de lenguajes/Prueba/p.txt'\n\tfp = codecs.open(test,\"r\",\"utf-8\")\n\tcadena = fp.read()\n\tfp.close()\n\n\tanalizador = lex.lex()\n\tanalizador.input(cadena)\n\n\n\twhile True:\n\t\ttok = analizador.token()\n\t\tif not tok : break\n\t\tif (tok.type=='entero' or tok.type=='cadena' or tok.type=='id'):\n\t\t\t#print (\"<\"+str(tok.type)+\",\"+str(tok.value)+\">\"+\"\\n\")\n\t\t\tarchivo.write(\"<\"+str(tok.type)+\",\"+str(tok.value)+\">\"+\"\\n\")\n\t\telse:\n\t\t\t#print (\"<\"+str(tok.type)+\",\"+ \" >\"+\"\\n\")\n\t\t\tarchivo.write(\"<\"+str(tok.type)+\",\"+ \" >\"+\"\\n\")\n\n\t\t\n\n\ttslocal.close()\n\ttslocal= open(\"tslocal.txt\",\"r\") #concatenamos el fichero que contien las ts locales con la global y eliminamos el local \n\tfor i in tslocal:\n\t\tts.write(i)\n\tremove(\"tslocal.txt\")\n\ttslocal.close()\n\tarchivo.close()\n\terrores.close()\n\tts.close()", "sub_path": "prueba.py", "file_name": "prueba.py", "file_ext": "py", "file_size_in_byte": 6227, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.argv", "line_number": 217, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 220, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 223, "usage_type": "call"}, {"api_name": "ply.lex.lex", "line_number": 227, "usage_type": "call"}, {"api_name": "ply.lex", "line_number": 227, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 247, "usage_type": "call"}]} +{"seq_id": "303732170", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport bs4\nfrom ..items import MyspiderItem\n\n\nclass DoubanMovieSpider(scrapy.Spider):\n name = 'douban_movie'\n allowed_domains = ['https://movie.douban.com']\n start_urls = ['https://movie.douban.com/chart']\n\n def parse(self, response):\n soup = bs4.BeautifulSoup(response.text, 'html.parser')\n elements = soup.find_all('div', class_='pl2')\n for element in elements:\n item = MyspiderItem()\n item['name'] = element.find('a').text.replace(' ', '').replace('\\n', '')\n item['url'] = element.find('a')['href']\n item['information'] = element.find('p', class_='pl').text.replace(' ', '').replace('\\n', '')\n item['rating'] = element.find('div', class_='star clearfix').text.replace(' ', '').replace('\\n', '')\n yield item\n", "sub_path": "08-About_scrapy/mySpider/mySpider/spiders/douban_movie.py", "file_name": "douban_movie.py", "file_ext": "py", "file_size_in_byte": 847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "scrapy.Spider", "line_number": 7, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}, {"api_name": "items.MyspiderItem", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "273227078", "text": "import urllib.request\nimport xml.etree.ElementTree as ET\n\nurl = \"http://py4e-data.dr-chuck.net/comments_716295.xml\"\nuh = urllib.request.urlopen(url)\ndata = uh.read()\n\ntree = ET.fromstring(data)\nresults = tree.findall('comments/comment')\ncount = 0\nsum = 0\n\nfor item in results:\n x = int(item.find('count').text)\n count = count + 1\n sum = sum + x\n\nprint (\"Sum: \",sum)\nprint (\"Count:\",count)\n", "sub_path": "urlparse.py", "file_name": "urlparse.py", "file_ext": "py", "file_size_in_byte": 398, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 5, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 5, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 5, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 8, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "568677839", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('create/', views.create, name='create'),\n path('add/', views.Add, name='Add'),\n path('blogs/', views.blogs, name='blogs'),\n path('/', views.detail, name='detail'),\n]", "sub_path": "Blog_App/Blogs/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 302, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "600763111", "text": "# Jmthon userbot \n#Jmthon string session \nimport os\nfrom time import sleep\n\nprint(\"\")\na = r\"\"\"\n© JMTHON-USERBOT ©\n\n╋┏┓╋╋┏┓┏┓\n╋┃┣━━┫┗┫┗┳━┳━┳┓\n┏┫┃┃┃┃┏┫┃┃╋┃┃┃┃\n┗━┻┻┻┻━┻┻┻━┻┻━┛\n • t.me/RR7PP\n \n ~ Jmthon UserBot\n\"\"\"\n\n\ndef spinner():\n print(\"نﻮﺜﻴﻠﻴﺘﻟﺍ ﺔﺒﺘﻜﻣ ﻦﻣ ﺪﻛﺄﺘﻟﺍ \")\n for _ in range(3):\n for frame in r\"-\\|/-\\|/\":\n print(\"\\b\", frame, sep=\"\", end=\"\", flush=True)\n sleep(0.1)\n\n\ndef clear_screen():\n if os.name == \"posix\":\n os.system(\"clear\")\n else:\n os.system(\"cls\")\n\n\ndef get_api_id_and_hash():\n print(\n \"ﻦﻣ يﺪﻳﺍ ﺐﻳﻻﺍﻭ شﺎﻫ ﻲﺒﻳﻻﺍ ﻰﻠﻋ ﻞﺼﺣﺍ my.telegram.org @Jmthon \\n\\n\",\n )\n try:\n API_ID = int(input(\"API_ID : \"))\n except ValueError:\n print(\"ﺢﻴﺤﺻ ﻞﻜﺸﺑ يﺪﻳﺍ ﻲﺒﻳﻻﺍ ﺔﻤﻴﻗ ﻊﺿﻭ ﻰﺟﺮﻳ \")\n exit(0)\n API_HASH = input(\"API_HASH : \")\n return API_ID, API_HASH\n\n\ndef telethon_session():\n try:\n spinner()\n\n x = \"\\bFound an existing installation of Telethon...\\nSuccessfully Imported.\\n\\n\"\n except BaseException:\n print(\"Installing Telethon...\")\n os.system(\"pip install -U telethon\")\n\n x = \"\\bInstalled and imported Telethon\" \n clear_screen()\n print(\"JMTHON STRING SESSION \")\n print(\"T.ME/JMTHON\")\n print(a)\n print(x)\n\n\n from telethon.errors.rpcerrorlist import ApiIdInvalidError, PhoneNumberInvalidError\n from telethon.sessions import StringSession\n from telethon.sync import TelegramClient\n\n API_ID, API_HASH = get_api_id_and_hash()\n\n # logging in\n try:\n with TelegramClient(StringSession(), API_ID, API_HASH) as barsha:\n print(\"ﺮﻈﺘﻧﺍ نﻮﺜﻤﺠﻟ ﺲﻜﻣﺮﻴﺗ دﻮﻛ ءﺎﺸﻧﺍ ﻢﺘﻳ\")\n adi = barsha.send_message(\n \"me\",\n f\"**كـود تيـرمكـس**:\\n\\n`{barsha.session.save()}`\\n\\n**لا تعطيه الى اي احد حتى المطورين !**\",\n )\n adi.reply(\"في الاعلى هو كود تيرمكس الخاص بك يجب لا تشاركه الى اي احد حتى لو ادعى أنه من المطورين\\n - @JMTHON\")\n print(\n \"ﺔﻇﻮﻔﺤﻤﻟﺍ ﻞﺋﺎﺳﺮﻟﺍ ﻦﻣ ﺪﻛﺎﺗ ﺲﻜﻣﺮﻴﺗ دﻮﻛ ﻊﻨﺻ حﺎﺠﻨﺑ ﻢﺗ\"\n )\n exit(0)\n except ApiIdInvalidError:\n print(\n \"ﺔﻴﻠﻤﻌﻟﺍ ﺖﻬﺘﻧﺍ ، ﺎﻄﺧ شﺎﻫ ﻲﺒﻳﻻﺍ وﺍ يﺪﻳﺍ ﻲﺒﻳﻻﺍ نﺍ وﺪﺒﻳ\"\n )\n exit(0)\n except ValueError:\n print(\"ﺔﻏﺭﺎﻓ ﻲﺒﻳﻻﺍ ﺔﻤﻴﻗ نﻮﻜﺗ ﻻ نﺍ ﺐﺠﻳ\")\n exit(0)\n except PhoneNumberInvalidError:\n print(\"ﺔﻴﻠﻤﻌﻟﺍ ءﺎﻬﺘﻧﺍ ،اﺩﺪﺠﻣ لﻭﺎﺣﻭ ﻒﺗﺎﻬﻟﺍ ﻢﻗﺭ ﻦﻣ ﺪﻛﺎﺗ\")\n exit(0)\n\n\ndef main():\n clear_screen()\n print(a)\n telethon_session()\n x = input(\"Run again? (y/n\")\n if x == \"y\":\n main()\n else:\n exit(0)\n\n\nmain()\n", "sub_path": "session/R7Setup.py", "file_name": "R7Setup.py", "file_ext": "py", "file_size_in_byte": 3274, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "os.name", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 30, "usage_type": "call"}, {"api_name": "os.system", "line_number": 32, "usage_type": "call"}, {"api_name": "os.system", "line_number": 55, "usage_type": "call"}, {"api_name": "telethon.sync.TelegramClient", "line_number": 73, "usage_type": "call"}, {"api_name": "telethon.sessions.StringSession", "line_number": 73, "usage_type": "call"}, {"api_name": "telethon.errors.rpcerrorlist.ApiIdInvalidError", "line_number": 84, "usage_type": "name"}, {"api_name": "telethon.errors.rpcerrorlist.PhoneNumberInvalidError", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "105947011", "text": "import gensim\nimport pickle\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.optimizers import RMSprop\nimport h5py\nfrom keras.models import model_from_json\n\n\n\n# 1.构建训练数据和测试数据\nfr = open('../data/word2vec_train.pkl','rb')\ntrainX = pickle.load(fr)\ntrainY = pickle.load(fr)\nfr.close()\n\n\n\n# 数据处理\nX = np.array(trainX)\na,b = X.shape\nY = np.array(trainY)\n\n# 构建神经网路\nmodel = Sequential([\n Dense(32, input_dim=300),\n Activation('relu'),\n Dense(6),\n Activation('softmax'),\n])\n\n#定义优化器\nrmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)\n\n#编译模型\nmodel.compile(optimizer=rmsprop,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nprint('Training ------------')\n# 训练数据\nmodel.fit(X,Y,epochs=3,batch_size=32)\n\n# 保存模型\njson_string = model.to_json()#等价于 json_string = model.get_config()\nopen('my_model_architecture.json','w').write(json_string)\nmodel.save_weights('my_model_weights.h5')\n\n", "sub_path": "AML/codes/vachor/NNet.py", "file_name": "NNet.py", "file_ext": "py", "file_size_in_byte": 1065, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pickle.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.optimizers.RMSprop", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "372556756", "text": "\"\"\"\n\"\"\"\nimport itertools\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport statsmodels.api as sm\nfrom tabulate import tabulate\n\nclass SarimaxModel():\n \"\"\"\n \"\"\"\n def __init__(self, ts, best_model=None, best_model_order=None, seasonal_model_order=None):\n self._ts = ts\n self._best_model = best_model\n self._best_model_order = best_model_order\n self._seasonal_model_order = seasonal_model_order\n \n @property\n def ts(self):\n return self._ts\n \n @ts.setter\n def ts(self, value):\n self._ts = value\n \n @property\n def best_model(self):\n return self._best_model\n \n @best_model.setter\n def best_model(self, value):\n self._best_model = value\n \n def generate_parameters_combination(self, max_range, seasonality=12):\n p = d = q = range(max_range)\n iters = list(itertools.product(p, d, q))\n return iters, [(x[0], x[1], x[2], seasonality) for x in iters]\n \n def grid_search(self, pdq, seasonal_pdq):\n aic_val = np.Inf\n out_ord = pdq[0]\n out_seas_ord = seasonal_pdq[0]\n for param in pdq:\n for seasonal_param in seasonal_pdq:\n try:\n model = sm.tsa.statespace.SARIMAX(self.ts,\n order=param,\n seasonal_order=seasonal_param,\n enforce_stationarity=False,\n enforce_invertibility=False)\n results = model.fit()\n res_aic = results.aic\n if res_aic < aic_val:\n best_results = results\n out_ord = param\n out_seas_ord = seasonal_param\n except:\n continue\n return best_results, out_ord, out_seas_ord\n \n def compute_mse(self, predictions, truth):\n return ((predictions - truth) ** 2).mean()\n \n def save_results(self, res_path, result_table):\n with open(res_path, 'w') as f:\n print(tabulate(result_table))\n\n def plot_res_diagnostics(self, res_path):\n pass\n \n \"\"\"\n def plot_res_diagnostics(results):\n results.plot_diagnostics(figsize=(15, 12))\n plt.savefig(os.path.join('res\\\\monza\\\\', ''.join([compound, '_diagnostics'])))\n plt.close()\"\"\"", "sub_path": "env_lab/processing/arima.py", "file_name": "arima.py", "file_ext": "py", "file_size_in_byte": 2484, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "itertools.product", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.Inf", "line_number": 41, "usage_type": "attribute"}, {"api_name": "statsmodels.api.tsa.statespace.SARIMAX", "line_number": 47, "usage_type": "call"}, {"api_name": "statsmodels.api.tsa", "line_number": 47, "usage_type": "attribute"}, {"api_name": "statsmodels.api", "line_number": 47, "usage_type": "name"}, {"api_name": "tabulate.tabulate", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "287626527", "text": "#coding=utf-8\nimport os\nimport gevent.monkey\ngevent.monkey.patch_all()\nimport multiprocessing\n#https://www.jianshu.com/p/fecf15ad0c9a\n#gunicorn部署Flask服务\ndebug = True\n#日辉输出级别\nloglevel = 'debug'\nbind = '10.0.12.113:3001'\npidfile = 'log/gunicorn.pid'\nlogfile = 'log/debug.log'\n\n#开启的每个工作进程的模式类型,默认为sync模式,也可使用gevent模式。\nworker_class = 'gunicorn.workers.ggevent.GeventWorker'\n#启动的进程数\nworkers = multiprocessing.cpu_count() * 2 + 1\nworkers = 5\n#线程数\nthreads = 2\n#字符串传输长度\nlimit_request_field_size=81900\n\n\nx_forwarded_for_header = 'X-FORWARDED-FOR'\n\ndaemon = False#意味着开启后台运行,默认为False", "sub_path": "find_new_words/Find_new_words_Utry/gun.py", "file_name": "gun.py", "file_ext": "py", "file_size_in_byte": 706, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "gevent.monkey.monkey.patch_all", "line_number": 4, "usage_type": "call"}, {"api_name": "gevent.monkey.monkey", "line_number": 4, "usage_type": "attribute"}, {"api_name": "gevent.monkey", "line_number": 4, "usage_type": "name"}, {"api_name": "multiprocessing.cpu_count", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "640882155", "text": "import keras\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array\nfrom keras.applications.mobilenet import preprocess_input\nfrom keras.utils import to_categorical\nfrom PIL import Image\n\n#Data Generator to efficiently load and preprocess data for training the classifier\n\nclass DataGenerator(keras.utils.Sequence):\n 'Generates data for Keras'\n def __init__(self, list_IDs, data, labels, batch_size=32, dim=(224, 224), n_channels=3,\n n_classes=2, shuffle=True):\n 'Initialization'\n self.dim = dim\n self.batch_size = batch_size\n self.data = data\n self.labels = labels\n self.list_IDs = list_IDs\n self.n_channels = n_channels\n self.n_classes = n_classes\n self.shuffle = shuffle\n self.on_epoch_end()\n\n def __len__(self):\n 'Denotes the number of batches per epoch'\n return int(np.floor(len(self.list_IDs) / self.batch_size))\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n # Generate indexes of the batch\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n\n return X, y\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch'\n self.indexes = np.arange(len(self.list_IDs))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n\n def __data_generation(self, list_IDs_temp):\n 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)\n # Initialization\n X = np.empty((self.batch_size, *self.dim, self.n_channels))\n y = np.empty((self.batch_size, self.n_classes))\n \n # printing the progress\n# print(str(int((int(list_IDs_temp[0])/len(self.list_IDs))*100)) + '%')\n\n # Generate data\n for i, ID in enumerate(list_IDs_temp):\n # preprocessing\n img_arr = self.data[ID]\n img = array_to_img(img_arr)\n img = img.resize((224,224), Image.ANTIALIAS)\n img.load()\n \n X[i] = preprocess_input(np.asarray(img, dtype=np.uint8))\n\n # Store target label(one-hot-encoding)\n y[i] = to_categorical(self.labels[str(ID)], num_classes=self.n_classes)\n\n return X, y", "sub_path": "utils/preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 2462, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "keras.utils", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.array_to_img", "line_number": 61, "usage_type": "call"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 62, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 62, "usage_type": "name"}, {"api_name": "keras.applications.mobilenet.preprocess_input", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 65, "usage_type": "attribute"}, {"api_name": "keras.utils.to_categorical", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "355138144", "text": "import requests\r\n\r\ndef line_send_image2(message, img_url, token = 'rpHUQIIMkArQh6EtQpqfjK6hjPN2jjNxh0zDbcFVoD2'):\r\n url = \"https://notify-api.line.me/api/notify\" # --> 不支援http, 只能用https\r\n headers = {\"Authorization\" : \"Bearer \"+ token}\r\n\r\n payload = {\"message\" : message}\r\n\r\n r = requests.get(img_url)\r\n files = {'imageFile': r.content}\r\n\r\n r = requests.post(url, headers = headers, params = payload, files = files)\r\n\r\nline_send_image2('test', 'https://github.com/maloyang/KH20210925_Python_Data_Science/blob/main/W02/dog.png?raw=true')\r\n", "sub_path": "W02/send.py", "file_name": "send.py", "file_ext": "py", "file_size_in_byte": 569, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "168042335", "text": "import discord\nfrom discord.ext import commands\n\n# For regular dictionary.\nfrom wn import WordNet\nfrom wn.info import WordNetInformationContent\nfrom wn.constants import wordnet_30_dir, wordnet_33_dir\n\n# For urban dictionary.\nimport urllib.request, json\n\n# Class which holds the dictionary.\nclass Dictionary(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n # WordNet dictionary.\n @commands.command(help='Searches your word in the dictionary')\n async def define(self, ctx):\n\n # Create instance of wordnet, get the query from context and find synsets via WordNet.\n wordnet = WordNet(wordnet_30_dir)\n command = ctx.message.content.split(' ', 1)\n\n if len(command) > 1:\n query = command[1]\n synset = wordnet.synsets(query)\n \n # String manipulation to get a list of definitions.\n definitions = \"\"\n index = 1\n\n # Formulates the definitions.\n if len(synset) > 0:\n for syn in synset:\n if(query in syn.name()):\n definitions = definitions + f\"{index}. [{self.categorise(syn.name())}] {syn.definition()}\\n\"\n index += 1\n await ctx.message.channel.send(f\"```\\nDefinition of {query}:\\n{definitions}```\")\n else:\n await ctx.message.channel.send(f\"Could not find requested word, doing a secondary search in Urban Dictionary...\")\n await self.udictHelper(ctx) \n else:\n await ctx.message.channel.send(f\"```Usage: !define ```\")\n \n # Categorises word based on the synset name.\n def categorise(self, word):\n if '.n.' in word:\n return \"noun\"\n elif '.a.' in word:\n return \"adj\"\n elif '.v.' in word:\n return \"verb\"\n elif '.r.' in word:\n return \"adv\"\n else:\n return \"n/a\"\n\n # Urban Dictionary.\n # Gets the definition of the word from Urban Dictionary.\n @commands.command(help='Uses Urban Dictionary to find a word')\n async def udict(self, ctx):\n await self.udictHelper(ctx)\n \n # Actual function which does everything.\n async def udictHelper(self, ctx):\n\n # Sets up variables to use for webscraping.\n command = ctx.message.content.split(' ', 1)\n\n if len(command) > 1:\n query = command[1]\n parseQuery = query.replace(' ', '+')\n url = \"http://api.urbandictionary.com/v0/define?term=\" + parseQuery\n response = urllib.request.urlopen(url)\n data = json.loads(response.read())\n\n # If not empty...\n if len(data) > 0:\n\n # Sort the data based on thumbs up.\n definitions = sorted(data['list'], key = lambda i : i['thumbs_up'], reverse=True)[0:3]\n block = \"\"\n index = 1\n\n # Organise into the right format.\n for d in definitions:\n definition = self.strip_artefacts(d['definition'])\n if d['example'][-2:] != '\\n':\n block = block + f\"{index}. {definition} ({d['thumbs_up']} thumbs up)\\n\\n{self.strip_artefacts(d['example'])}\\n\\n\"\n else:\n block = block + f\"{index}. {definition} ({d['thumbs_up']} thumbs up)\\n\\n{self.strip_artefacts(d['example'])}\\n\"\n index += 1\n\n message = f\"Searching Urban Dictionary for {query}...\\n```{block}```\"\n\n else:\n message = f\"Could not find requested word on Urban Dictionary.\" \n else:\n message = f\"```Usage: !udict ```\" \n \n await ctx.message.channel.send(message)\n \n # Strips the string.\n def strip_artefacts(self, string):\n\n # Strip the brackets.\n string = string.replace('[', '')\n string = string.replace(']', '')\n \n # In case of doubles...\n string = string.replace('\\r\\n\\r\\n', '\\r\\n')\n return string", "sub_path": "modules/dictionary.py", "file_name": "dictionary.py", "file_ext": "py", "file_size_in_byte": 4089, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 13, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 13, "usage_type": "name"}, {"api_name": "wn.WordNet", "line_number": 23, "usage_type": "call"}, {"api_name": "wn.constants.wordnet_30_dir", "line_number": 23, "usage_type": "argument"}, {"api_name": "discord.ext.commands.command", "line_number": 19, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 19, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 62, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 62, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 76, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 76, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 76, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "471110594", "text": "from datetime import date\nfrom dateutil.relativedelta import relativedelta\n\n\nclass TimeAnalyzer:\n\n TWENTY_YEARS_AGO = date.today() + relativedelta(years=-20)\n TWENTY_YEARS = 20\n TEN_YEARS = 10\n THREE_YEARS = 3\n\n @staticmethod\n def evaluate(expunger):\n if expunger.most_recent_conviction:\n elig_date = TimeAnalyzer._calc_elig_date(expunger.most_recent_conviction, TimeAnalyzer.TEN_YEARS)\n TimeAnalyzer._mark_as_time_ineligible(expunger.charges, 'Time-ineligible under 137.225(7)(b)', elig_date)\n TimeAnalyzer._check_mrc_time_eligibility(expunger)\n elif expunger.most_recent_dismissal:\n TimeAnalyzer._mark_all_acquittals_ineligible_using_mrd_date(expunger)\n TimeAnalyzer._mark_as_time_eligible(expunger.most_recent_dismissal.case()().charges)\n TimeAnalyzer._mark_as_time_eligible(expunger.convictions)\n else:\n TimeAnalyzer._mark_as_time_eligible(expunger.charges)\n\n TimeAnalyzer._evaluate_class_b_felonies(expunger)\n\n @staticmethod\n def _check_mrc_time_eligibility(expunger):\n eligibility_date = TimeAnalyzer._calc_furthest_out_elig_date(expunger)\n if expunger.second_most_recent_conviction:\n expunger.most_recent_conviction.set_time_ineligible('Multiple convictions within last ten years', eligibility_date)\n elif TimeAnalyzer._most_recent_conviction_is_greater_than_three_years_old(expunger):\n expunger.most_recent_conviction.set_time_eligible()\n else:\n expunger.most_recent_conviction.set_time_ineligible('Most recent conviction is less than three years old', eligibility_date)\n\n @staticmethod\n def _calc_furthest_out_elig_date(expunger):\n if expunger.second_most_recent_conviction:\n date_1 = TimeAnalyzer._calc_elig_date(expunger.second_most_recent_conviction, TimeAnalyzer.TEN_YEARS)\n date_2 = TimeAnalyzer._calc_elig_date(expunger.most_recent_conviction, TimeAnalyzer.THREE_YEARS)\n return max(date_1, date_2)\n else:\n return TimeAnalyzer._calc_elig_date(expunger.most_recent_conviction, TimeAnalyzer.THREE_YEARS)\n\n @staticmethod\n def _calc_elig_date(charge, years):\n return charge.disposition.date + relativedelta(years=years)\n\n @staticmethod\n def _most_recent_conviction_is_greater_than_three_years_old(expunger):\n three_years_ago = date.today() + relativedelta(years=-3)\n return expunger.most_recent_conviction.disposition.date <= three_years_ago\n\n @staticmethod\n def _mark_all_acquittals_ineligible_using_mrd_date(expunger):\n eligibility_date = expunger.most_recent_dismissal.date + relativedelta(years=+TimeAnalyzer.THREE_YEARS)\n for charge in expunger.acquittals:\n charge.set_time_ineligible('Recommend sequential expungement', eligibility_date)\n\n @staticmethod\n def _evaluate_class_b_felonies(expunger):\n if expunger.most_recent_charge and expunger.most_recent_charge.disposition.date > TimeAnalyzer.TWENTY_YEARS_AGO:\n for charge in expunger.class_b_felonies:\n charge.set_time_ineligible('Time-ineligible under 137.225(5)(a)(A)(i)',\n expunger.most_recent_charge.disposition.date + relativedelta(\n years=TimeAnalyzer.TWENTY_YEARS))\n\n @staticmethod\n def _mark_as_time_ineligible(charges, reason, eligibility_date):\n for charge in charges:\n charge.set_time_ineligible(reason, eligibility_date)\n\n @staticmethod\n def _mark_as_time_eligible(charges):\n for charge in charges:\n charge.set_time_eligible()\n", "sub_path": "src/backend/expungeservice/expunger/analyzers/time_analyzer.py", "file_name": "time_analyzer.py", "file_ext": "py", "file_size_in_byte": 3699, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "datetime.date.today", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 7, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 7, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 52, "usage_type": "name"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 52, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 57, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "535668451", "text": "#---------------#\n#--- imports ---#\n#---------------#\n\nfrom flask import Flask, render_template, request, session, flash, redirect, url_for\nfrom functools import wraps\nfrom forms import AddTaskForm, RegisterForm, LoginForm\nfrom flask_sqlalchemy import SQLAlchemy\nimport datetime\n\n\n#--------------#\n#--- config ---#\n#--------------#\n\n\napp = Flask(__name__)\napp.config.from_object('_config')\ndb = SQLAlchemy(app)\n\nfrom models import Task, User\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n#------------------------#\n#--- helper functions ---#\n#------------------------#\n\n\ndef login_required(test):\n @wraps(test)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return test(*args, **kwargs)\n else:\n flash('Önce giriş yapın.')\n return redirect(url_for('login'))\n return wrap\n\n\n#----------------------#\n#--- route handlers ---#\n#----------------------#\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef login():\n error = None\n form = LoginForm(request.form)\n if request.method == 'POST':\n if form.validate_on_submit():\n user = User.query.filter_by(name=request.form['name']).first()\n if user is not None and user.password == request.form['password']:\n session['logged_in'] = True\n session['user_id'] = user.id\n flash('Welcome!')\n return redirect(url_for('tasks'))\n else:\n error = 'Kullanıcı adı veya şifre yanlış'\n else:\n error = 'Tüm alanlar zorunludur'\n return render_template('login.html', form=form, error=error)\n\n\n@app.route('/logout')\ndef logout():\n session.pop('logged_in', None)\n session.pop('user_id', None)\n flash('Çıkış yaptınız!')\n return redirect(url_for('tasks'))\n\n\n@app.route('/task/')\n@login_required\ndef tasks():\n open_task = db.session.query(Task).filter_by(status='1').order_by(Task.due_date.asc())\n closed_task = db.session.query(Task).filter_by(status='0').order_by(Task.due_date.asc())\n return render_template('task.html', form=AddTaskForm(request.form), open_task=open_task, closed_task=closed_task)\n\n\n@app.route('/add/', methods=['GET', 'POST'])\n@login_required\ndef new_task():\n form = AddTaskForm(request.form)\n if request.method == 'POST':\n if form.validate_on_submit():\n new_task = Task(form.name.data, form.due_date.data, form.priority.data,\n datetime.datetime.utcnow(), '1', session['user_id'])\n db.session.add(new_task)\n db.session.commit()\n flash('New entry was successfully posted. Thanks.')\n return redirect(url_for('tasks'))\n else:\n flash('Tüm alanlar zorunludur!')\n return redirect(url_for('tasks'))\n return render_template('task.html', form=form)\n\n\n@app.route('/complate//')\n@login_required\ndef complate(task_id):\n new_id = task_id\n db.session.query(Task).filter_by(task_id=new_id).update({\"status\": \"0\"})\n db.session.commit()\n flash('Görev tamamlandı olarak işaretlendi')\n return redirect(url_for('tasks'))\n\n\n@app.route('/delete//')\n@login_required\ndef delete_entry(task_id):\n new_id = task_id\n db.session.query(Task).filter_by(task_id=new_id).delete()\n db.session.commit()\n flash('Görev silindi. Neden yeni bir tane eklemiyorsunuz?')\n return redirect(url_for('tasks'))\n\n\n@app.route('/register/', methods=['GET','POST'])\ndef register():\n error = None\n form = RegisterForm(request.form)\n if request.method == 'POST':\n new_user = User(form.name.data, form.email.data, form.password.data)\n db.session.add(new_user)\n db.session.commit()\n flash('Kayıt olduğunuz için teşekkürler. Lütfen giriş yapın.')\n return redirect(url_for('login'))\n return render_template('register.html', form=form, error=error)\n", "sub_path": "views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3923, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 17, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 35, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 29, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "models.User.query.filter_by", "line_number": 50, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 50, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.session.pop", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 68, "usage_type": "call"}, {"api_name": "models.Task", "line_number": 74, "usage_type": "argument"}, {"api_name": "models.Task.due_date.asc", "line_number": 74, "usage_type": "call"}, {"api_name": "models.Task.due_date", "line_number": 74, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 75, "usage_type": "argument"}, {"api_name": "models.Task.due_date.asc", "line_number": 75, "usage_type": "call"}, {"api_name": "models.Task.due_date", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 76, "usage_type": "call"}, {"api_name": "forms.AddTaskForm", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "forms.AddTaskForm", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 82, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "models.Task", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 94, "usage_type": "call"}, {"api_name": "models.Task", "line_number": 101, "usage_type": "argument"}, {"api_name": "flask.flash", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Task", "line_number": 111, "usage_type": "argument"}, {"api_name": "flask.flash", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 114, "usage_type": "call"}, {"api_name": "forms.RegisterForm", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 120, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 120, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 121, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 121, "usage_type": "name"}, {"api_name": "models.User", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 125, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "113513209", "text": "# -*- coding: utf-8 -*-\n\n# https://www.pythonsheets.com/notes/python-sqlalchemy.html\n\nfrom sqlalchemy import create_engine, and_, or_\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import sessionmaker\n\n\nif __name__ == '__main__':\n engine = create_engine(\"sqlite:///lab05-ex01.sqlite\")\n Session = sessionmaker(bind=engine)\n session = Session()\n\n Base = automap_base()\n Base.prepare(engine, reflect=True)\n\n Pessoa = Base.classes.Pessoa\n Telefones = Base.classes.Telefones\n lista_de_pessoas = session.query(Pessoa).join(Telefones).all()\n\n for linha in lista_de_pessoas:\n print(\"Nome: {}\".format(linha.nome))\n for tel in linha.telefones_collection:\n print(\"Telefone: {}\".format(tel.numero))\n\n pessoas = session.query(Pessoa).all()\n\n for linha in pessoas:\n print('{}\\t{}'.format(linha.idPessoa,linha.nome))\n\n telefones = session.query(Telefones).filter(Telefones.idPessoa == linha.idPessoa)\n for tel in telefones:\n print('{}'.format(tel.numero))\n\n pessoas = session.query(Pessoa).filter(Pessoa.nome.ilike('J%')).all()\n for pessoa in pessoas:\n print('{}\\t{}'.format(pessoa.idPessoa, pessoa.nome))", "sub_path": "exemplo03.py", "file_name": "exemplo03.py", "file_ext": "py", "file_size_in_byte": 1214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.automap.automap_base", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "604835425", "text": "from meuBarFavorito.app import db\nfrom flask import abort, make_response, jsonify\nfrom meuBarFavorito.models.Estabelecimento import Estabelecimento\nfrom meuBarFavorito.models.Evento import Evento\nfrom meuBarFavorito.models.Foto import Foto\nfrom meuBarFavorito.models.Partida import Partida\n\ndef commit():\n try:\n db.session.commit()\n except Exception as ex:\n print(ex.args)\n abortComErro({'code': 500, 'body': {'mensagem': 'Erro interno!'}}, 500)\n\ndef salvar(obj):\n try:\n db.session.add(obj)\n db.session.commit()\n except Exception as ex:\n print(ex.args)\n abortComErro({'code': 500, 'body': {'mensagem': 'Erro interno!'}}, 500)\n\ndef deletar(obj):\n try:\n db.session.delete(obj)\n db.session.commit()\n except Exception as ex:\n print(ex.args)\n abortComErro({'code': 500, 'body': {'mensagem': 'Erro interno!'}}, 500)\n\ndef abortComErro(json, codigo):\n abort(make_response(jsonify(json), codigo))\n\ndef getEstabelecimento(id):\n try:\n return Estabelecimento.query.filter_by(id = id).first()\n except Exception as ex:\n print(ex.args)\n abortComErro({'code': 500, 'body': {'mensagem': 'Erro interno!'}}, 500)\n\ndef getEvento(id):\n try:\n return Evento.query.filter_by(id = id).first()\n except Exception as ex:\n print(ex.args)\n abortComErro({'code': 500, 'body': {'mensagem': 'Erro interno!'}}, 500)\n\ndef getEventoPorPartida(idPartida):\n try:\n return Evento.query.filter_by(idPartida = idPartida).all()\n except Exception as ex:\n print(ex.args)\n abortComErro({'code': 500, 'body': {'mensagem': 'Erro interno!'}}, 500)\n\ndef getFoto(id):\n try:\n return Foto.query.filter_by(id = id).first()\n except Exception as ex:\n print(ex.args)\n abortComErro({'code': 500, 'body': {'mensagem': 'Erro interno!'}}, 500)\n\ndef getListaDeFotosDoEstabelecimento(idEstabelecimento):\n try:\n return Foto.query.filter_by(idEstabelecimento = idEstabelecimento).all()\n except Exception as ex:\n print(ex.args)\n abortComErro({'code': 500, 'body': {'mensagem': 'Erro interno!'}}, 500)\n\ndef getPartida(id):\n try:\n return Partida.query.filter_by(id = id).first()\n except Exception as ex:\n print(ex.args)\n abortComErro({'code': 500, 'body': {'mensagem': 'Erro interno!'}}, 500)\n\ndef getListaDePartidas():\n try:\n return Partida.query.all()\n except Exception as ex:\n print(ex.args)\n abortComErro({'code': 500, 'body': {'mensagem': 'Erro interno!'}}, 500)", "sub_path": "meuBarFavorito/infraestructure/DbAccess.py", "file_name": "DbAccess.py", "file_ext": "py", "file_size_in_byte": 2586, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "meuBarFavorito.app.db.session.commit", "line_number": 10, "usage_type": "call"}, {"api_name": "meuBarFavorito.app.db.session", "line_number": 10, "usage_type": "attribute"}, {"api_name": "meuBarFavorito.app.db", "line_number": 10, "usage_type": "name"}, {"api_name": "meuBarFavorito.app.db.session.add", "line_number": 17, "usage_type": "call"}, {"api_name": "meuBarFavorito.app.db.session", "line_number": 17, "usage_type": "attribute"}, {"api_name": "meuBarFavorito.app.db", "line_number": 17, "usage_type": "name"}, {"api_name": "meuBarFavorito.app.db.session.commit", "line_number": 18, "usage_type": "call"}, {"api_name": "meuBarFavorito.app.db.session", "line_number": 18, "usage_type": "attribute"}, {"api_name": "meuBarFavorito.app.db", "line_number": 18, "usage_type": "name"}, {"api_name": "meuBarFavorito.app.db.session.delete", "line_number": 25, "usage_type": "call"}, {"api_name": "meuBarFavorito.app.db.session", "line_number": 25, "usage_type": "attribute"}, {"api_name": "meuBarFavorito.app.db", "line_number": 25, "usage_type": "name"}, {"api_name": "meuBarFavorito.app.db.session.commit", "line_number": 26, "usage_type": "call"}, {"api_name": "meuBarFavorito.app.db.session", "line_number": 26, "usage_type": "attribute"}, {"api_name": "meuBarFavorito.app.db", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 32, "usage_type": "call"}, {"api_name": "meuBarFavorito.models.Estabelecimento.Estabelecimento.query.filter_by", "line_number": 36, "usage_type": "call"}, {"api_name": "meuBarFavorito.models.Estabelecimento.Estabelecimento.query", "line_number": 36, "usage_type": "attribute"}, {"api_name": "meuBarFavorito.models.Estabelecimento.Estabelecimento", "line_number": 36, "usage_type": "name"}, {"api_name": "meuBarFavorito.models.Evento.Evento.query.filter_by", "line_number": 43, "usage_type": "call"}, {"api_name": "meuBarFavorito.models.Evento.Evento.query", "line_number": 43, "usage_type": "attribute"}, {"api_name": "meuBarFavorito.models.Evento.Evento", "line_number": 43, "usage_type": "name"}, {"api_name": "meuBarFavorito.models.Evento.Evento.query.filter_by", "line_number": 50, "usage_type": "call"}, {"api_name": "meuBarFavorito.models.Evento.Evento.query", "line_number": 50, "usage_type": "attribute"}, {"api_name": "meuBarFavorito.models.Evento.Evento", "line_number": 50, "usage_type": "name"}, {"api_name": "meuBarFavorito.models.Foto.Foto.query.filter_by", "line_number": 57, "usage_type": "call"}, {"api_name": "meuBarFavorito.models.Foto.Foto.query", "line_number": 57, "usage_type": "attribute"}, {"api_name": "meuBarFavorito.models.Foto.Foto", "line_number": 57, "usage_type": "name"}, {"api_name": "meuBarFavorito.models.Foto.Foto.query.filter_by", "line_number": 64, "usage_type": "call"}, {"api_name": "meuBarFavorito.models.Foto.Foto.query", "line_number": 64, "usage_type": "attribute"}, {"api_name": "meuBarFavorito.models.Foto.Foto", "line_number": 64, "usage_type": "name"}, {"api_name": "meuBarFavorito.models.Partida.Partida.query.filter_by", "line_number": 71, "usage_type": "call"}, {"api_name": "meuBarFavorito.models.Partida.Partida.query", "line_number": 71, "usage_type": "attribute"}, {"api_name": "meuBarFavorito.models.Partida.Partida", "line_number": 71, "usage_type": "name"}, {"api_name": "meuBarFavorito.models.Partida.Partida.query.all", "line_number": 78, "usage_type": "call"}, {"api_name": "meuBarFavorito.models.Partida.Partida.query", "line_number": 78, "usage_type": "attribute"}, {"api_name": "meuBarFavorito.models.Partida.Partida", "line_number": 78, "usage_type": "name"}]} +{"seq_id": "604271632", "text": "from flask import request, Flask, jsonify\nfrom datetime import datetime, date, timedelta\nimport time\nimport pymysql, datetime\nimport os\nimport hashlib\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import g\napp = Flask(__name__)\n\n@app.route(\"/register\", methods=['POST'])\ndef register():\n # print(\"connecting\")\n # print(request.form.get('username'), request.form.get('pwd'), request.form.get('gender'), request.form.get('age'), request.form.get('phone_model'), request.form.get('cpu_model'),\n # int(request.form.get('memory_size')), float(request.form.get('battery_size')), int(request.form.get('storage_size')))\n db = pymysql.connect(\"localhost\",\"root\",\"123456\",\"iotcollecter\")\n cursor = db.cursor()\n sql = \"select * from user where user_name=%s\"\n try:\n cursor.execute(sql, request.form.get('username'))\n results = cursor.fetchall()\n # print(results)\n db.commit()\n # print(len(results))\n # print('asd')\n if len(results) == 1:\n # print('555')\n return '2'\n else:\n # print('888')\n #sql0 = \"INSERT INTO user(user_name, password, sex, birthday) VALUES (%s, %s, %s, %s)\"\n sql1 = \"INSERT INTO user(user_name, password, sex, birthday, phone_model, cpu_model, memory_size, battery_size, storage_size) VALUES (%s, %s, %s, %s, %s, %s ,%s, %s, %s)\"\n sql2 = \"create table runstatusof\" + request.form.get('username') + \"(id int primary key not null auto_increment, update_time datetime, CPU_ratio float, memory_ratio float, battery_ratio float, storage_ratio float);\"\n sql3 = \"create table locationof\" + request.form.get('username') + \"(id int primary key not null auto_increment, update_time datetime, longitude double, latitude double);\"\n sql4 = \"create table stepof\" + request.form.get('username') + \"(id int primary key not null auto_increment, update_time datetime, step_sum int, step_today int);\"\n sql5 = \"create table webof\" + request.form.get('username') + \"(id int primary key not null auto_increment, url varchar(100), time datetime);\"\n sql6 = \"create table appof\" + request.form.get('username') + \"(id int primary key not null auto_increment, pkgname varchar(40), foregroundtime int, launcherCount int);\"\n try:\n # print('999')\n # cursor.execute(sql0, (request.form.get('username'), request.form.get('pwd'), request.form.get('gender'), dateToMysql(request.form.get('age'))))\n # print('333')\n cursor.execute(sql1, (request.form.get('username'), request.form.get('pwd'), request.form.get('gender'), dateToMysql(request.form.get('age')),request.form.get('phone_model'),\n request.form.get('cpu_model'), request.form.get('memory_size'), request.form.get('battery_size'), request.form.get('storage_size')))\n cursor.execute(sql2)\n cursor.execute(sql3)\n cursor.execute(sql4)\n cursor.execute(sql5)\n cursor.execute(sql6)\n db.commit()\n # print ('000')\n return '1'\n except:\n db.rollback()\n return '0'\n except:\n db.rollback()\n return '0'\n db.close()\n\n@app.route('/login', methods=['POST'])\ndef login():\n db = pymysql.connect(\"localhost\",\"root\",\"123456\",\"iotcollecter\" )\n cursor = db.cursor()\n sql = \"select * from user where user_name=%s and password=%s\"\n # print(request.form.get('username'))\n # print(request.form.get('pwd'))\n try:\n cursor.execute(sql,(request.form.get('username'),request.form.get('pwd')))\n results = cursor.fetchall()\n # print(len(results))\n if len(results)==1:\n # print('return 1')\n return '1'\n else:\n # print('return 0')\n return '0'\n db.commit()\n except:\n db.rollback()\n db.close()\n\n@app.route('/uploadRunStatus',methods=['POST'])\ndef uploadRunStatus():\n # print(request.form.get('memory_ratio'))\n # print(request.form.get('user_name'))\n # print(request.form.get('storage_ratio'))\n # print(request.form.get('cpu_ratio'))\n # print(request.form.get('battery_ratio'))\n db = pymysql.connect(\"localhost\",\"root\",\"123456\",\"iotcollecter\" )\n cursor = db.cursor()\n dateMysql = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S');\n # print(dateMysql)\n sql1 = \"select memory_size, storage_size from user where user_name = '\" + request.form.get('user_name') + \"'\"\n try:\n cursor.execute(sql1)\n temp = cursor.fetchall();\n # print(temp)\n except:\n db.rollback()\n return'0'\n # print(temp[0][0])\n # print(temp[0][1])\n memory_ratio = 100 - float(request.form.get('memory_ratio')) / temp[0][0] /10.24\n storage_ratio = 100 - float(request.form.get('storage_ratio')) / temp[0][1] * 100\n # print(memory_ratio)\n # print(storage_ratio)\n sql = \"insert into runstatusof\" + request.form.get('user_name') + \"(update_time, CPU_ratio, memory_ratio, battery_ratio, storage_ratio) values ('\" + dateMysql + \"', '\" + request.form.get('cpu_ratio') + \"', '\" + str(memory_ratio) + \"', '\" + request.form.get('battery_ratio') + \"', '\" + str(storage_ratio) + \"')\"\n #sql = \"insert into runstatusof%s values (dataMysql, 1, request.form.get('memory_ratio'), 1, 1\"\n try:\n # print(\"111\")\n # print(sql)\n cursor.execute(sql)\n db.commit()\n return '3'\n except:\n db.rollback()\n return '0'\n db.close()\n\n@app.route('/uploadLocation',methods=['POST'])\ndef uploadLocation():\n # print(\"uploaddddddd\")\n # print(request.form.get('lng'))\n # print(request.form.get('lat'))\n # print(request.form.get('user_name'))\n db = pymysql.connect(\"localhost\",\"root\",\"123456\",\"iotcollecter\" )\n cursor = db.cursor()\n date1 = datetime.datetime.now()\n sql1 = \"select id,UNIX_TIMESTAMP(update_time) from locationof\" + request.form.get('user_name') + \" order by id desc limit 1\"\n cursor.execute(sql1)\n r1 = cursor.fetchall()\n dateMysql = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S');\n # print(dateMysql)\n sql = \"insert into locationof\" + request.form.get('user_name') + \"(update_time, longitude, latitude) values ('\" + dateMysql + \"', '\" + request.form.get('lng') + \"', '\" + request.form.get('lat') + \"')\"\n try:\n cursor.execute(sql)\n db.commit()\n if len(r1) == 1:\n sql2 = \"update locationof\" + request.form.get('user_name') + \" set stay_time = \" + str(int(time.mktime(date1.timetuple())) - r1[0][1]) + \" where id = \" + str(r1[0][0])\n print(sql2)\n cursor.execute(sql2)\n db.commit()\n return '3'\n except:\n db.rollback()\n return '0'\n db.close()\n\n@app.route('/uploadStep',methods=['POST'])\ndef uploadStep():\n # print(request.form.get('step_sum'))\n # print(request.form.get('step_today'))\n # print(request.form.get('user_name'))\n db = pymysql.connect(\"localhost\",\"root\",\"123456\",\"iotcollecter\" )\n cursor = db.cursor()\n dateMysql = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S');\n # print(dateMysql)\n today = date.today().strftime(\"%Y-%m-%d\")\n # print(today)\n sql2 = \"delete from stepof\" + request.form.get('user_name') + \" where update_time > '\" + today + \"'\"\n # print(sql2)\n sql = \"insert into stepof\" + request.form.get('user_name') + \"(update_time, step_sum, step_today) values ('\" + dateMysql + \"', '\" + request.form.get('step_sum') + \"', '\" + request.form.get('step_today') + \"')\"\n try:\n cursor.execute(sql2)\n db.commit()\n except:\n db.rollback()\n return '0'\n try:\n print(sql)\n cursor.execute(sql)\n db.commit()\n return '3'\n except:\n db.rollback()\n return '0'\n db.close()\n\n@app.route('/getStepOfYesterday', methods=['POST'])\ndef getStepOfYesterday():\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"iotcollecter\")\n cursor = db.cursor()\n yesterday = (date.today() + timedelta(days=-1)).strftime(\"%Y-%m-%d\")\n today = date.today().strftime(\"%Y-%m-%d\")\n # print(yesterday)\n # print(today)\n sql = \"select step_sum from stepof\" + request.form.get('username') + \" where update_time < '\" + today + \"'\" + \"order by id desc\"\n try:\n print(\"1114444444444\")\n # print(sql)\n cursor.execute(sql)\n i = cursor.fetchall();\n print(i)\n if len(i) != 0:\n print(str(i[0][0]))\n return str(i[0][0])\n else:\n return '0'\n except:\n db.rollback()\n return '0'\n db.close()\n\n@app.route('/getLastUploadtime', methods=['POST'])\ndef getLastUploadtime():\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"iotcollecter\")\n cursor = db.cursor()\n today = date.today().strftime(\"%Y-%m-%d\")\n sql = \"select UNIX_TIMESTAMP(update_time) from stepof\" + request.form.get('username') + \" where update_time < '\" + today + \"'\" + \"order by update_time desc limit 1\"\n try:\n cursor.execute(sql)\n i = cursor.fetchall();\n if len(i) == 1:\n print(str(i[0][0]))\n return str(i[0][0])\n else:\n return '0'\n except:\n db.rollback()\n return '0'\n db.close()\n\napp.config['SQLALCHEMY_DATABASE_URI']='mysql+pymysql://root:123456@127.0.0.1:3306/iotcollecter'\n# 跟踪数据库的修改\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']=True\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\nuserdb = SQLAlchemy(app)\n\nclass webInfoT(userdb.Model):\n __tablename__ = 'webI'\n id = userdb.Column(userdb.Integer, primary_key=True) # primary_key会自动填充\n url = userdb.Column(userdb.String(100))\n time = userdb.Column(userdb.String(100))\n@app.route('/Record_stroy',methods=['POST'])\ndef Record_stroy():\n userdb.drop_all()\n userdb.create_all()\n print(request.form.get(\"url\"))\n webI= webInfoT(url=request.form.get(\"url\"), time=request.form.get(\"time\"))\n userdb.session.add(webI)\n userdb.session.commit()\n\n@app.route('/get_user',methods=['POST'])\ndef get_user():\n g.name=request.form.get(\"username\")\n print(request.form.get(\"username\"))\n return request.form.get(\"username\")\n\nclass appInfoT(userdb.Model):\n __tablename__ ='appI';\n id = userdb.Column(userdb.Integer, primary_key=True)\n username = userdb.Column(userdb.String(100)) # primary_key会自动填充\n pkgName = userdb.Column(userdb.String(100))\n foregroundtime = userdb.Column(userdb.String(100))\n launcherCount = userdb.Column(userdb.String(100))\n\n@app.route('/app_Record_stroy',methods=['POST'])\ndef app_Record_stroy():\n userdb.drop_all()\n userdb.create_all()\n print(request.form.get(\"pkgName\"))\n app=appInfoT( username=request.form.get(\"username\"),pkgName=request.form.get(\"pkgName\"),foregroundtime=request.form.get(\"foregroundtime\"),launcherCount=request.form.get(\"launcherCount\"))\n userdb.session.add(app)\n userdb.session.commit()\n return \"aaa\"\n\n\n\ndef dateToMysql(originstr):\n return originstr[0:4] + '-' + originstr[4:6] + '-' + originstr[6:8]\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\",debug=True)", "sub_path": "IotCollecter/IotCollecterFlask/manage2.py", "file_name": "manage2.py", "file_ext": "py", "file_size_in_byte": 11247, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "pymysql.connect", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "pymysql.connect", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 91, "usage_type": "attribute"}, {"api_name": "flask.request.form.get", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 103, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 104, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 104, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "pymysql.connect", "line_number": 126, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 128, "usage_type": "attribute"}, {"api_name": "flask.request.form.get", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 129, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 129, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "attribute"}, {"api_name": "flask.request.form.get", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 139, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 139, "usage_type": "name"}, {"api_name": "time.mktime", "line_number": 139, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 154, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 156, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 156, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 158, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 160, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 160, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 160, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 162, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 162, "usage_type": "name"}, {"api_name": "pymysql.connect", "line_number": 181, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 183, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 183, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 183, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 184, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 184, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 187, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 187, "usage_type": "name"}, {"api_name": "pymysql.connect", "line_number": 206, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 208, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 208, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 209, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 209, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 209, "usage_type": "name"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 227, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 238, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 238, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 238, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 239, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 239, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 239, "usage_type": "name"}, {"api_name": "flask.g.name", "line_number": 245, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 245, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 245, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 245, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 245, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 246, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 246, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 246, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 247, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 247, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 247, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 261, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 261, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 261, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 262, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 262, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 262, "usage_type": "name"}]} +{"seq_id": "633359151", "text": "#Jeu = https://www.jeu.fr/jeu/piano-magique\n\nimport pyautogui\nimport keyboard\nimport win32api, win32con\nimport time\n\ntime.sleep(1)\n\ndef click(x,y):\n win32api.SetCursorPos((x,y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)\n time.sleep(0.01)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)\n\nwhile keyboard.is_pressed(\"s\") == False:\n if pyautogui.pixel(2200, 500)[0] == 0:\n click(2200, 500)\n if pyautogui.pixel(2300, 500)[0] == 0:\n click(2300, 500)\n if pyautogui.pixel(2400, 500)[0] == 0:\n click(2400, 500)\n if pyautogui.pixel(2500, 500)[0] == 0:\n click(2500, 500)", "sub_path": "piano_tiles.py", "file_name": "piano_tiles.py", "file_ext": "py", "file_size_in_byte": 601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "time.sleep", "line_number": 8, "usage_type": "call"}, {"api_name": "win32api.SetCursorPos", "line_number": 11, "usage_type": "call"}, {"api_name": "win32api.mouse_event", "line_number": 12, "usage_type": "call"}, {"api_name": "win32con.MOUSEEVENTF_LEFTDOWN", "line_number": 12, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "win32api.mouse_event", "line_number": 14, "usage_type": "call"}, {"api_name": "win32con.MOUSEEVENTF_LEFTUP", "line_number": 14, "usage_type": "attribute"}, {"api_name": "keyboard.is_pressed", "line_number": 16, "usage_type": "call"}, {"api_name": "pyautogui.pixel", "line_number": 17, "usage_type": "call"}, {"api_name": "pyautogui.pixel", "line_number": 19, "usage_type": "call"}, {"api_name": "pyautogui.pixel", "line_number": 21, "usage_type": "call"}, {"api_name": "pyautogui.pixel", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "299491564", "text": "\"\"\"\nCredits: \n\nBhushan Sonawane https://github.com/bhushan23 Apple, Inc.\n\nhttps://github.com/onnx/onnx-coreml/issues/478\n\n\"\"\"\nfrom pytorch_transformers.modeling_distilbert import DistilBertForQuestionAnswering\nfrom onnx_coreml import convert\nimport torch\nimport numpy as np\n\nmodel = DistilBertForQuestionAnswering.from_pretrained(\n \"distilbert-base-uncased-distilled-squad\", torchscript=True\n)\ntorch.save(model, './distilbert.pt')\nmodel.eval()\n\ntorch.onnx.export(\n model,\n torch.ones(1, 128, dtype=torch.long),\n \"distilbert-squad-128.onnx\",\n verbose=True,\n input_names=[\"input_ids\"],\n output_names=[\"start_scores\", \"end_scores\"],\n)\n\n\ndef _convert_softmax(builder, node, graph, err):\n \"\"\"\n convert to CoreML SoftMax ND Layer:\n https://github.com/apple/coremltools/blob/655b3be5cc0d42c3c4fa49f0f0e4a93a26b3e492/mlmodel/format/NeuralNetwork.proto#3547\n \"\"\"\n axis = node.attrs.get(\"axis\", 1)\n builder.add_softmax_nd(\n name=node.name,\n input_name=node.inputs[0],\n output_name=node.outputs[0]\n + (\"_softmax\" if node.op_type == \"LogSoftmax\" else \"\"),\n axis=axis,\n )\n if node.op_type == \"LogSoftmax\":\n builder.add_unary(\n name=node.name + \"_log\",\n input_name=node.outputs[0] + \"_softmax\",\n output_name=node.outputs[0],\n mode=\"log\",\n )\n\n\nmlmodel = convert(\n model=\"./distilbert-squad-128.onnx\",\n target_ios=\"13\",\n custom_conversion_functions={\"Softmax\": _convert_softmax},\n)\nmlmodel.save(\"./distilbert-squad-128.mlmodel\")\n", "sub_path": "model_generation/distilbert-onnx-coreml-bhushan.py", "file_name": "distilbert-onnx-coreml-bhushan.py", "file_ext": "py", "file_size_in_byte": 1560, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pytorch_transformers.modeling_distilbert.DistilBertForQuestionAnswering.from_pretrained", "line_number": 14, "usage_type": "call"}, {"api_name": "pytorch_transformers.modeling_distilbert.DistilBertForQuestionAnswering", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.onnx.export", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.onnx", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.ones", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 22, "usage_type": "attribute"}, {"api_name": "onnx_coreml.convert", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "163567270", "text": "n=int(input())\nlist=[]\nfrom itertools import combinations\nfor i in range(n+1):\n list.append(i)\n list.append(i)\nres=[]\nfor i in combinations(list,2):\n temp=[]\n for j in i:\n temp.append(j)\n if temp[0]*temp[0]+temp[1]*temp[1]==n:\n res.append(temp)\nif len(res)!=0:\n print('True')\nelse:\n print('Fasle')", "sub_path": "Code/CodeRecords/2219/60647/289607.py", "file_name": "289607.py", "file_ext": "py", "file_size_in_byte": 332, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "itertools.combinations", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "571946758", "text": "\"\"\"\nCopyright (c) 2019, National Institute of Informatics\nAll rights reserved.\nAuthor: Huy H. Nguyen\n-----------------------------------------------------\nScript for testing classification of ClassNSeg (the proposed method)\n\"\"\"\n\nimport os\nimport torch\nimport numpy as np\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nfrom tqdm import tqdm\nfrom sklearn import metrics\nfrom scipy.optimize import brentq\nfrom scipy.interpolate import interp1d\nfrom sklearn.metrics import roc_curve\nimport argparse\nfrom model.ae import Encoder\nfrom model.ae import ActivationLoss\nfrom PIL import Image\nimport torch.utils.data as data\n\n\ndef default_loader(path):\n return Image.open(path).convert('RGB')\n\nclass FileListDataset(data.Dataset):\n def __init__(self, list_file, transform=None):\n with open(list_file, 'rt') as f:\n all_paths=[x.strip() for x in f.readlines()];\n print('num of images=', len(all_paths)) \n self.dataset = all_paths\n self.transform=transform\n\n def __getitem__(self, idx):\n item = self.dataset[idx]\n img = default_loader(item)\n # add transforms\n if self.transform is not None:\n img=self.transform(img)\n return img, 0\n\n def __len__(self):\n return len(self.dataset)\n \nparser = argparse.ArgumentParser()\nparser.add_argument('-i', '--input', dest='input', help='input_file_list')\nparser.add_argument('-o', '--output', dest='output', help='Path to save outputs.', default='./output')\n\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=4)\nparser.add_argument('--batchSize', type=int, default=64, help='input batch size')\nparser.add_argument('--imageSize', type=int, default=256, help='the height / width of the input image to network')\nparser.add_argument('--gpu_id', type=int, default=-1, help='GPU ID')\nparser.add_argument('--id', type=int, default=46, help=\"checkpoint ID\")\nparser.add_argument('--outf', default='checkpoints/full', help='folder to output images and model checkpoints')\n\nopt = parser.parse_args()\nprint(opt)\n\nif __name__ == \"__main__\":\n\n \n\n encoder = Encoder(3)\n act_loss_fn = ActivationLoss()\n \n encoder.load_state_dict(torch.load(os.path.join(opt.outf,'encoder_' + str(opt.id) + '.pt'), map_location=torch.device('cpu')))\n encoder.eval()\n\n if opt.gpu_id >= 0:\n encoder.cuda(opt.gpu_id)\n act_loss_fn.cuda(opt.gpu_id)\n\n class Normalize_3D(object):\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n \"\"\"\n Tensor: Normalized image.\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n Returns: \"\"\"\n for t, m, s in zip(tensor, self.mean, self.std):\n t.sub_(m).div_(s)\n return tensor\n\n transform_tns = transforms.Compose([\n transforms.Resize((256,256)),\n transforms.ToTensor(),\n ])\n \n transform_norm = Normalize_3D((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n\n dataset_test = FileListDataset(opt.input, transform=transform_tns)\n assert dataset_test\n dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=opt.batchSize, shuffle=False, num_workers=int(opt.workers))\n\n loss_act_test = 0.0\n\n tol_label = np.array([], dtype=np.float32)\n tol_pred = np.array([], dtype=np.float32)\n tol_pred_prob = np.array([], dtype=np.float32)\n\n count = 0\n\n for fft_data, labels_data in tqdm(dataloader_test):\n\n fft_label = labels_data.numpy().astype(np.float32)\n labels_data = labels_data.float()\n\n rgb = transform_norm(fft_data[:,:,:,0:256])\n\n if opt.gpu_id >= 0:\n rgb = rgb.cuda(opt.gpu_id)\n labels_data = labels_data.cuda(opt.gpu_id)\n\n latent = encoder(rgb).reshape(-1, 2, 64, 16, 16)\n\n zero_abs = torch.abs(latent[:,0]).view(latent.shape[0], -1)\n zero = zero_abs.mean(dim=1)\n\n one_abs = torch.abs(latent[:,1]).view(latent.shape[0], -1)\n one = one_abs.mean(dim=1)\n\n loss_act = act_loss_fn(zero, one, labels_data)\n loss_act_data = loss_act.item()\n\n output_pred = np.zeros((fft_data.shape[0]), dtype=np.float32)\n\n for i in range(fft_data.shape[0]):\n if one[i] >= zero[i]:\n output_pred[i] = 1.0\n else:\n output_pred[i] = 0.0\n\n tol_label = np.concatenate((tol_label, fft_label))\n tol_pred = np.concatenate((tol_pred, output_pred))\n \n pred_prob = torch.softmax(torch.cat((zero.reshape(zero.shape[0],1), one.reshape(one.shape[0],1)), dim=1), dim=1)\n tol_pred_prob = np.concatenate((tol_pred_prob, pred_prob[:,1].data.cpu().numpy()))\n\n loss_act_test += loss_act_data\n count += 1\n #break\n acc_test = metrics.accuracy_score(tol_label, tol_pred)\n loss_act_test /= count\n\n fpr, tpr, thresholds = roc_curve(tol_label, tol_pred_prob, pos_label=1)\n eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)\n\n text_writer = open(opt.output, 'w')\n for i in range(len(tol_pred)):\n #print(dataset_test.dataset[i], tol_pred[i], tol_pred_prob[i])\n text_writer.write('%s,%.6f,%.6f\\n'% (dataset_test.dataset[i], tol_pred[i], tol_pred_prob[i]))\n\n text_writer.flush()\n text_writer.close()\n\n print('[Epoch %d] act_loss: %.4f acc: %.2f eer: %.2f' % (opt.id, loss_act_test, acc_test*100, eer*100))\n", "sub_path": "ClassNSeg_test_cls.py", "file_name": "ClassNSeg_test_cls.py", "file_ext": "py", "file_size_in_byte": 5611, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "PIL.Image.open", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 30, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 49, "usage_type": "call"}, {"api_name": "model.ae.Encoder", "line_number": 67, "usage_type": "call"}, {"api_name": "model.ae.ActivationLoss", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 70, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 92, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 92, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 93, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 93, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 94, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 106, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 107, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 113, "usage_type": "attribute"}, {"api_name": "torch.abs", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 133, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 145, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 150, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 150, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 153, "usage_type": "call"}, {"api_name": "scipy.optimize.brentq", "line_number": 154, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 154, "usage_type": "call"}]} +{"seq_id": "141606927", "text": "import hashlib\nimport functools\n\n@functools.singledispatch\ndef hash(arg):\n type_name = type(arg).__name__\n assert False, \"Unsupported object type : \" + type_name\n\n@hash.register(str)\ndef _(arg):\n sha = hashlib.sha1(bytes(arg,'utf-8')).hexdigest()\n return sha\n\n\n@hash.register(list)\ndef _(arg):\n result = type(arg)()\n for i in arg:\n result.append(hashlib.sha1(bytes(i,'utf-8')).hexdigest())\n return result\n\n@hash.register(tuple)\ndef _(arg):\n result = []\n for i in arg:\n result.append(hashlib.sha1(bytes(i,'utf-8')).hexdigest())\n return tuple(result)\n\n@hash.register(set)\ndef _(arg):\n result = type(arg)()\n for i in arg:\n result.add(hashlib.sha1(bytes(i,'utf-8')).hexdigest())\n return result\n\n\n@hash.register(dict)\ndef _(arg):\n keys = arg.keys()\n values = []\n for i in arg.values():\n values.append(hashlib.sha1(bytes(i,'utf-8')).hexdigest())\n result = dict.fromkeys(keys,None)\n result.update(zip(keys,values))\n return result\n\n\nclass TestInit:\n\n def test_hash(self):\n assert(hash({'Hello','world'})=={'f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0', '7c211433f02071597741e6ff5a8ea34789abbf43'})\n\n def test_hash_str(self):\n assert(hash('Hello')=='f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0')\n\n", "sub_path": "Decorators/task_1.py", "file_name": "task_1.py", "file_ext": "py", "file_size_in_byte": 1287, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "functools.singledispatch", "line_number": 4, "usage_type": "attribute"}, {"api_name": "hashlib.sha1", "line_number": 11, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 19, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 26, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 33, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "393621648", "text": "import os\n\nimport cv2\nimport dogs_cnn_models\nimport numpy as np\nimport pandas as pd\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keraspipelines import KerasPipeline\nfrom scipy import misc\nfrom tqdm import tqdm\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n\ndef load_image(path, img_size):\n img = misc.imread(path, mode='RGB')\n img = misc.imresize(img, img_size)\n return img\n\n\ndef load_data(src, df, img_size, labels=None):\n\n X = np.zeros((df.shape[0], img_size[0], img_size[1], 3), dtype='float32')\n if labels is not None:\n y_train = []\n\n for i in tqdm(range(df.shape[0])):\n X[i] = load_image('{}{}.jpg'.format(\n src, df.iloc[i, :]['id']), img_size)\n if labels is not None:\n y_train.append(labels[i])\n\n if labels is not None:\n return X, np.array(y_train, np.uint8)\n else:\n return X\n\n\n# Set directory structure:\nsrc_dir = '/home/w/Projects/Dog_Breeds/input/'\nsrc_train = src_dir + 'train/'\nsrc_test = src_dir + 'test/'\nimage_size = (224, 224)\n\n\ndf_train = pd.read_csv(src_dir + 'labels.csv')\ndf_test = pd.read_csv(src_dir + 'sample_submission.csv')\n\n\ntargets_series = pd.Series(df_train['breed'])\none_hot_df = pd.get_dummies(targets_series, sparse=True)\none_hot = one_hot_df.values\n\n\nX_train, y_train = load_data(src_train, df_train, image_size, one_hot)\nX_test = load_data(src_test, df_test, image_size)\n\nprint('Training data shape:', X_train.shape)\nprint('Test data shape:', X_test.shape)\n\n\nnumber_classes = y_train.shape[1]\n\nmodel_callbacks = [EarlyStopping(monitor='val_loss', patience=5, verbose=1),\n ReduceLROnPlateau(monitor='val_loss', factor=0.5, verbose=1,\n patience=3, min_lr=1e-5)]\n\nmodel_parameters = {\n 'img_size': (image_size[0], image_size[1], 3),\n 'num_classes': number_classes,\n}\n\n\ntrain_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.1,\n zoom_range=0.25,\n rotation_range=45,\n width_shift_range=0.25,\n height_shift_range=0.25,\n horizontal_flip=True,\n channel_shift_range=0.07)\n\nvalid_datagen = ImageDataGenerator(rescale=1. / 255,)\n\n\npipeline_parameters = {\n 'model_name': getattr(dogs_cnn_models, 'resnet_dense'),\n 'predict_test': True,\n 'model_callbacks': model_callbacks,\n 'number_epochs': 10,\n 'batch_size': 16,\n 'seed': 1337,\n 'shuffle': True,\n 'verbose': True,\n\n 'run_save_name': 'resnet_dense_5fold_SKF_run1',\n 'load_keras_model': False,\n 'save_model': True,\n 'save_history': True,\n 'save_statistics': True,\n 'output_statistics': True,\n\n 'src_dir': os.getcwd(),\n\n 'train_datagen': train_datagen,\n 'valid_datagen': valid_datagen,\n 'test_datagen': train_datagen,\n 'number_test_augmentations': 5,\n}\n\n\npipeline = KerasPipeline(model_name=pipeline_parameters['model_name'],\n predict_test=pipeline_parameters['predict_test'],\n model_callbacks=pipeline_parameters['model_callbacks'],\n number_epochs=pipeline_parameters['number_epochs'],\n batch_size=pipeline_parameters['batch_size'],\n seed=pipeline_parameters['seed'],\n shuffle=pipeline_parameters['shuffle'],\n verbose=pipeline_parameters['verbose'],\n\n run_save_name=pipeline_parameters['run_save_name'],\n load_keras_model=pipeline_parameters['load_keras_model'],\n save_model=pipeline_parameters['save_model'],\n save_history=pipeline_parameters['save_history'],\n save_statistics=pipeline_parameters['save_statistics'],\n output_statistics=pipeline_parameters['output_statistics'],\n\n src_dir=pipeline_parameters['src_dir'],\n\n train_datagen=pipeline_parameters['train_datagen'],\n valid_datagen=pipeline_parameters['valid_datagen'],\n test_datagen=pipeline_parameters['test_datagen'],\n number_test_augmentations=pipeline_parameters['number_test_augmentations'],\n )\n\n\nkf_model, oof_train, oof_test = pipeline.kfold_run(\n X_train=X_train,\n y_train=y_train,\n X_test=X_test,\n model_params=model_parameters,\n n_folds=5,\n stratify=True,\n index_number=1,\n flow_augment=True\n)\n\n\npd.to_pickle(oof_train, 'OOF_train_resnet_dense_5fold_SKF_run1.pkl')\npd.to_pickle(oof_test, 'OOF_test_resnet_dense_5fold_SKF_run1.pkl')\n\n\nsubmission = pd.DataFrame(oof_test.mean(axis=-1))\nsubmission.columns = one_hot_df.columns.values\nsubmission.insert(0, 'id', df_test['id'])\nsubmission.to_csv(\n 'SUB_resnet_dense_5fold_SKF_run1.csv', index=False)\n", "sub_path": "examples/dog_breed/dogs_training_script.py", "file_name": "dogs_training_script.py", "file_ext": "py", "file_size_in_byte": 4926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "scipy.misc.imread", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 18, "usage_type": "name"}, {"api_name": "scipy.misc.imresize", "line_number": 19, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.callbacks.ReduceLROnPlateau", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 76, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 86, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 106, "usage_type": "call"}, {"api_name": "keraspipelines.KerasPipeline", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.to_pickle", "line_number": 152, "usage_type": "call"}, {"api_name": "pandas.to_pickle", "line_number": 153, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "413124200", "text": "def warn(*args, **kwargs):\n pass\nimport warnings\nwarnings.warn = warn\n\nimport os\nimport tarfile\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pandas import DataFrame\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import StratifiedShuffleSplit, cross_val_predict, cross_val_score\nfrom sklearn.metrics import precision_score, recall_score, f1_score\n\nPATH = os.getcwd() + \"/data/\" # \"/home/jb7656/MachineLearning/Group Project/Datasets/historical-hourly-weather-data/\"\nFILE_NAMES = [\"humidity.csv\", \"pressure.csv\", \"temperature.csv\", \"wind_direction.csv\", \"wind_speed.csv\", \"weather_description.csv\"]\n\n# reads in all csv's of data\ndef load_weather_data():\n result_df = pd.DataFrame()\n\n for i in range(0, len(FILE_NAMES)):\n\n f = FILE_NAMES[i]\n df = pd.read_csv(PATH + f)\n df = df[[\"Portland\"]]\n\n if(i == 0):\n result_df = pd.concat([result_df, df])\n result_df.columns = [f[:len(f) - 4]]\n else:\n result_df.insert(i, f[:len(f) - 4], df, allow_duplicates = False)\n\n return result_df\n\ndef clean(data):\n\n data = data.copy()\n data.dropna(inplace=True) # drop any rows with a missing value\n\n # weather description modifications\n # remove some descriptions\n data.where(data[\"weather_description\"] != \"mist\", inplace=True)\n data.where(data[\"weather_description\"] != \"fog\", inplace=True)\n data.where(data[\"weather_description\"] != \"haze\", inplace=True)\n data.where(data[\"weather_description\"] != \"smoke\", inplace=True)\n data.where(data[\"weather_description\"] != \"light snow\", inplace=True)\n data.where(data[\"weather_description\"] != \"snow\", inplace=True)\n data.where(data[\"weather_description\"] != \"proximity thunderstorm\", inplace=True)\n data.where(data[\"weather_description\"] != \"dust\", inplace=True)\n data.where(data[\"weather_description\"] != \"freezing rain\", inplace=True)\n data.where(data[\"weather_description\"] != \"heavy snow\", inplace=True)\n data.where(data[\"weather_description\"] != \"sleet\", inplace=True)\n data.dropna(inplace=True)\n # combine some descriptions\n y = data[\"weather_description\"]\n y.replace(\"sky is clear\", \"clear\", inplace=True)\n y.replace(\"light rain\", \"rain\", inplace=True)\n y.replace(\"moderate rain\", \"rain\", inplace=True)\n y.replace(\"heavy intensity rain\", \"rain\", inplace=True)\n y.replace(\"light intensity drizzle\", \"rain\", inplace=True)\n y.replace(\"very heavy rain\", \"rain\", inplace=True)\n y.replace(\"thunderstorm\", \"rain\", inplace=True)\n y.replace(\"thunderstorm with light rain\", \"rain\", inplace=True)\n y.replace(\"drizzle\", \"rain\", inplace=True)\n y.replace(\"overcast clouds\", \"clouds\", inplace=True)\n y.replace(\"broken clouds\", \"clouds\", inplace=True)\n y.replace(\"few clouds\", \"clouds\", inplace=True)\n y.replace(\"scattered clouds\", \"clouds\", inplace=True)\n # encode\n encoder = LabelEncoder()\n # put back\n data.drop(\"weather_description\", axis=1, inplace=True)\n data[\"weather_description\"] = encoder.fit_transform(y)\n data.reset_index(inplace=True)\n\n # standardize all the numerical values\n X = data.drop(\"weather_description\", axis=1)\n std_scaler = StandardScaler()\n X_scaled = std_scaler.fit_transform(X)\n # put back\n result_data = DataFrame(X_scaled, columns=X.columns)\n result_data[\"weather_description\"] = data[\"weather_description\"].copy()\n\n return result_data, encoder\n\ndef split(data):\n\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\n for train_index, test_index in split.split(data, data[\"weather_description\"]):\n train_set = data.loc[train_index]\n test_set = data.loc[test_index]\n\n X_train = train_set.drop(\"weather_description\", axis=1).values\n y_train = train_set[\"weather_description\"].copy().values\n X_test = test_set.drop(\"weather_description\", axis=1).values\n y_test = test_set[\"weather_description\"].copy().values\n\n return X_train, y_train, X_test, y_test\n\ndef train(X_train, y_train):\n sgd_clf = SGDClassifier(random_state=42)\n sgd_clf.fit(X_train, y_train)\n return sgd_clf\n\ndef print_scores(sgd_clf, X, y):\n y_pred = cross_val_predict(sgd_clf, X, y, cv=3)\n print(\"accuracy\")\n print(cross_val_score(sgd_clf, X, y, cv=3, scoring=\"accuracy\"))\n print(\"precision\")\n print(precision_score(y, y_pred, average=None))\n print(\"recall\")\n print(recall_score(y, y_pred, average=None))\n print(\"f1\")\n print(f1_score(y, y_pred, average=None))\n\ndef main(data=None):\n\n if data is None:\n data = load_weather_data()\n\n clean_data, encoder = clean(data)\n X_train, y_train, X_test, y_test = split(clean_data)\n sgd_clf = train(X_train, y_train)\n\n print(\"---Training set---\")\n print_scores(sgd_clf, X_train, y_train)\n print(\"---Test set---\")\n print_scores(sgd_clf, X_test, y_test)\n\n return data, sgd_clf, encoder", "sub_path": "sgd_classify-40s_backup.py", "file_name": "sgd_classify-40s_backup.py", "file_ext": "py", "file_size_in_byte": 4990, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "warnings.warn", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 81, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.model_selection.StratifiedShuffleSplit", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.linear_model.SGDClassifier", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_predict", "line_number": 109, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 111, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 115, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "628721792", "text": "import math\nimport json\n\nfrom FixedWidthTextParser.Seismic.SpsParser import Point\nfrom SeismicFold.Bin import Bin\nfrom SeismicFold.helpers import create_point_by_line_point_idx\n\n\nclass Grid:\n def __init__(self, x0: float = 0.0, y0: float = 0.0, rot: float = 0.0, dxb: float = 0.0, dyb: float = 0.0,\n nxb: int = 0, nyb: int = 0):\n self.__x0 = x0\n self.__y0 = y0\n self.__rot = rot\n self.__dxb = dxb\n self.__dyb = dyb\n self.__nxb = nxb\n self.__nyb = nyb\n\n def get_x0(self):\n return self.__x0\n\n def get_y0(self):\n return self.__y0\n\n def get_rot(self):\n return self.__rot\n\n def get_dxb(self):\n return self.__dxb\n\n def get_dyb(self):\n return self.__dyb\n\n def get_nxb(self):\n return self.__nxb\n\n def get_nyb(self):\n return self.__nyb\n\n def rotate_point_x_y(self, point: Point, counterclockwise=False):\n rotated_point = create_point_by_line_point_idx(point.line, point.point, point.point_idx)\n x = point.easting\n y = point.northing\n x0 = float(self.__x0)\n y0 = float(self.__y0)\n rot = float(self.__rot)\n if counterclockwise is True:\n rot = rot * -1\n\n radians = math.radians(rot)\n\n adjusted_x = (x - x0)\n adjusted_y = (y - y0)\n cos_rad = math.cos(radians)\n sin_rad = math.sin(radians)\n rotx = x0 + cos_rad * adjusted_x + sin_rad * adjusted_y\n roty = y0 + -sin_rad * adjusted_x + cos_rad * adjusted_y\n\n rotated_point.easting = rotx\n rotated_point.northing = roty\n\n return rotated_point\n\n def bin_xy2cr(self, rotated: Point):\n b = Bin()\n\n c1 = (rotated.easting - self.__x0) / self.__dxb\n c2 = math.floor(c1)\n\n if c1 > c2:\n c = int(c2 + 1)\n else:\n c = int(c2)\n\n r1 = (rotated.northing - self.__y0) / self.__dyb\n r2 = math.floor(r1)\n\n if r1 > r2:\n r = int(r2 + 1)\n else:\n r = int(r2)\n\n b.column = c\n b.row = r\n\n return b\n\n def bin_number(self, b: Bin):\n number = -1\n if b.column > 0 and b.row > 0:\n number = (b.row - 1) * self.__nxb + b.column\n\n return number\n\n def bin_cr(self, bin_number: int):\n b = Bin()\n r = int(math.floor((bin_number / self.__nxb)) + 1)\n c = bin_number - (r - 1) * self.__nxb\n b.column = c\n b.row = r\n\n return b\n\n def write(self, filename):\n grid_dict = {'x0': self.__x0, 'y0': self.__y0, 'rot': self.__rot, 'dxb': self.__dxb, 'dyb': self.__dyb,\n 'nxb': self.__nxb, 'nyb': self.__nyb}\n file = open(filename, \"w\")\n json.dump(grid_dict, file)\n file.close()\n\n def read(self, filename):\n file = open(filename, \"r\")\n grid_dict = json.load(file)\n self.__x0 = grid_dict['x0']\n self.__y0 = grid_dict['y0']\n self.__rot = grid_dict['rot']\n self.__dxb = grid_dict['dxb']\n self.__dyb = grid_dict['dyb']\n self.__nxb = grid_dict['nxb']\n self.__nyb = grid_dict['nyb']\n file.close()\n", "sub_path": "SeismicFold/Grid.py", "file_name": "Grid.py", "file_ext": "py", "file_size_in_byte": 3183, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "FixedWidthTextParser.Seismic.SpsParser.Point", "line_number": 41, "usage_type": "name"}, {"api_name": "SeismicFold.helpers.create_point_by_line_point_idx", "line_number": 42, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 51, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 55, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 56, "usage_type": "call"}, {"api_name": "FixedWidthTextParser.Seismic.SpsParser.Point", "line_number": 65, "usage_type": "name"}, {"api_name": "SeismicFold.Bin.Bin", "line_number": 66, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 69, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 77, "usage_type": "call"}, {"api_name": "SeismicFold.Bin.Bin", "line_number": 89, "usage_type": "name"}, {"api_name": "SeismicFold.Bin.Bin", "line_number": 97, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 98, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 109, "usage_type": "call"}, {"api_name": "json.load", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "2724520", "text": "import re\nimport vim\nimport vimside.env\nimport vimside.rpc as rpc\nimport logging\n\ndef _is_operator_part(c):\n return re.match(r\"[~!@%^*+-<>?:=&|/\\\\]\", c)\n\ndef _find_word_at(line, col):\n logging.info(\"Searching for scala word at %s\", col)\n def find(p):\n if col >= len(line):\n return (col, col)\n start = col\n while start >= 0 and p(line[start]):\n start -= 1\n start += 1\n\n end = col\n while end < len(line) and p(line[end]):\n end += 1\n\n end -= 1\n\n return (start, end)\n\n (start, end) = find(lambda c: re.match(r\"[\\w_]\", c))\n\n logging.info(\"Found word in range (%s, %s) => '%s'\", start, end, line[start: end + 1])\n\n if start > col or end < col:\n return find(lambda c: _is_operator_part(c))\n else:\n return (start, end)\n\ndef _showStatus(status):\n print(status)\n\ndef ShowTypeAtPoint(env, line, col):\n if not env.is_ready():\n return 0\n\n def handleSymbolInfo(resp):\n tpe = env.typeinfo.format_symbol(resp.result()[\"ok\"])\n if tpe is not None:\n _showStatus(tpe)\n\n def handleTypeInfo(resp):\n tpe = env.typeinfo.format_type_info(resp.result()[\"ok\"])\n if tpe is None:\n env.typeinfo.askSymbolInfo(filename, current).add_done_callback(handleSymbolInfo)\n else:\n _showStatus(tpe)\n\n filename = vim.eval('expand(\"%:p\")')\n\n word = _find_word_at(vim.current.buffer[line], col)\n\n line_offset = int(vim.eval(\"line2byte(%s)\" % line))\n start = word[0] + line_offset\n end = word[1] + line_offset\n current = line_offset + col\n\n env.typeinfo.askTypeInfo(filename, start, end).add_done_callback(handleTypeInfo)\n\n return 0\n", "sub_path": "python/vimside/vim/commands/typeinfo.py", "file_name": "typeinfo.py", "file_ext": "py", "file_size_in_byte": 1724, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "re.match", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 11, "usage_type": "call"}, {"api_name": "re.match", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 30, "usage_type": "call"}, {"api_name": "vim.eval", "line_number": 56, "usage_type": "call"}, {"api_name": "vim.current", "line_number": 58, "usage_type": "attribute"}, {"api_name": "vim.eval", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "567447970", "text": "#! /usr/bin/env python\r\n\r\nimport argparse\r\nimport re\r\nimport os\r\n\r\nparser = argparse.ArgumentParser(description=\"Pre-processing Data Name\")\r\n\r\n# I&O file\r\nparser.add_argument('--input_file', dest=\"input_file\", type=str, default=\"./data/data.txt\", help=\"Data File\")\r\nparser.add_argument('--class_file', dest=\"class_file\", type=str, default=\"./data/class.txt\", help=\"Class File\")\r\nparser.add_argument('--code_file', dest='code_file', type=str, default='./data/code_list.txt', help=\"Code List File\")\r\n\r\n# Pre-process Mode\r\nparser.add_argument('--DN', dest=\"DN\", type=bool, default=False,\r\n help=\"Replace Dash to None (With Default Function Phase 2.)\")\r\nparser.add_argument('--DS', dest=\"DS\", type=bool, default=False,\r\n help=\"Replace Dash to Space (With Default Function Phase 3.)\")\r\nparser.add_argument('--SC', dest=\"SC\", type=bool, default=False,\r\n help=\"Delete Single Character\\nBe Processed After Default Function Phase 4.\")\r\nparser.add_argument('--DC', dest=\"DC\", type=bool, default=False,\r\n help=\"Delete Word with Duplicated Character (cf. XX, MMM)\\n\\\r\n Be Processed After Default Function Phase 4.\\n\\\r\n With SC Option, Processing with SC\")\r\nparser.add_argument('--NA', dest=\"NA\", type=str, default=None,\r\n help=\"Delete NA Word in NA File Path\\nBe Processed at the Last\")\r\n\r\nargs = parser.parse_args()\r\n\r\n\r\ndef pre_process_data(input_file, class_file, code_file, dn, ds, sc, dc, na_file):\r\n dir_path = os.path.dirname(os.path.realpath(input_file))\r\n folder_name = \"Pre_Processed\" + \"_DN\"*dn + \"_DS\"*ds + \"_SC\"*sc + \"_DC\"*dc + \"_NA\"*bool(na_file)\r\n output_path = os.path.join(dir_path, folder_name)\r\n if not os.path.exists(output_path):\r\n os.makedirs(output_path)\r\n\r\n data_list = list(open(input_file, 'r', encoding=\"utf-8\").readlines())\r\n class_list = list(open(class_file, 'r', encoding=\"utf-8\").readlines())\r\n code_list = list(open(code_file, 'r', encoding=\"utf-8\").readlines())\r\n\r\n g = open(os.path.join(output_path, \"data.txt\"), 'w', encoding=\"utf-8\")\r\n h = open(os.path.join(output_path, \"class.txt\"), 'w', encoding=\"utf-8\")\r\n\r\n if na_file:\r\n with open(na_file, 'r', encoding=\"utf-8\") as e:\r\n na_list = [line.rstrip() for line in e]\r\n\r\n for i, line in enumerate(data_list):\r\n try:\r\n class_data = int(class_list[i].strip())\r\n if class_data < 10000 or class_data > 999999:\r\n del class_list[i]\r\n del data_list[i]\r\n continue\r\n except ValueError:\r\n del class_list[i]\r\n del data_list[i]\r\n continue\r\n\r\n if class_list[i] not in code_list:\r\n del class_list[i]\r\n del data_list[i]\r\n continue\r\n\r\n # Phase 1&2.\r\n if dn and not ds:\r\n data = re.sub(r'[\\.\\-]', '', line.strip().upper())\r\n else:\r\n data = re.sub(r'[\\.]', '', line.strip().upper())\r\n\r\n # Phase 3.\r\n if ds and not dn:\r\n data = re.sub(r'[^A-Z]', ' ', data)\r\n else:\r\n data = re.sub(r'[^A-Z\\-]', ' ', data)\r\n\r\n if not ds and not dn:\r\n data = ' '.join(x.strip('-') for x in data.split())\r\n\r\n if sc and dc and na_file:\r\n data = ' '.join(x for x in data.split() if len(x) > 1 and len(set(x)) > 1 and x not in na_list)\r\n elif sc and dc:\r\n data = ' '.join(x for x in data.split() if len(x) > 1 and len(set(x)) > 1)\r\n elif sc and na_file:\r\n data = ' '.join(x for x in data.split() if len(x) > 1 and x not in na_list)\r\n elif dc and na_file:\r\n data = ' '.join(x for x in data.split() if len(set(x)) > 1 and x not in na_list)\r\n elif sc:\r\n data = ' '.join(x for x in data.split() if len(x) > 1)\r\n elif dc:\r\n data = ' '.join(x for x in data.split() if len(set(x)) > 1)\r\n elif na_file:\r\n data = ' '.join(x for x in data.split() if x not in na_list)\r\n else:\r\n data = ' '.join(x for x in data.split())\r\n\r\n if data == '':\r\n del class_list[i]\r\n del data_list[i]\r\n continue\r\n\r\n g.write(data + '\\n')\r\n h.write(class_list[i].strip() + '\\n')\r\n\r\n\r\ndef main():\r\n if args.DN and args.DS:\r\n print(\"Select Either DN or DS\")\r\n exit()\r\n pre_process_data(args.input_file, args.class_file, args.code_file, args.DN, args.DS, args.SC, args.DC, args.NA)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "data_preprocess.py", "file_name": "data_preprocess.py", "file_ext": "py", "file_size_in_byte": 4602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 68, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 70, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 74, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "524218677", "text": "# 14 July 2018 Miroslav Gasparek\n# Python bootcamp, lesson 40: Image processing practice with Python\n\n# Import modules\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport scipy.ndimage\nimport skimage.io\nimport skimage.segmentation\nimport skimage.morphology\n\n# Import some pretty Seaborn settings\nimport seaborn as sns\nrc={'lines.linewidth': 2, 'axes.labelsize': 18, 'axes.titlesize': 18}\nsns.set(rc=rc)\n\ndef cell_segmenter(im, thresh='otsu', radius=20.0, image_mode='phase',\n area_bounds=(0,1e7), ecc_bounds=(0, 1)):\n \"\"\"\n This function segments a given image via thresholding and returns\n a labeled segmentation mask.\n\n Parameters\n ----------\n im : 2d-array\n Image to be segmented. This may be of either float or integer\n data type.\n thresh : int, float, or 'otsu'\n Value used during thresholding operation. This can either be a value\n (`int` or `float`) or 'otsu'. If 'otsu', the threshold value will be\n determined automatically using Otsu's thresholding method.\n radius : float\n Radius for gaussian blur for background subtraction. Default value\n is 20.\n image_mode : 'phase' or 'fluorescence'\n Mode of microscopy used to capture the image. If 'phase', objects with\n intensity values *lower* than the provided threshold will be selected.\n If `fluorescence`, values *greater* than the provided threshold will be\n selected. Default value is 'phase'.\n area_bounds : tuple of ints.\n Range of areas of acceptable objects. This should be provided in units\n of square pixels.\n ecc_bounds : tuple of floats\n Range of eccentricity values of acceptable objects. These values should\n range between 0.0 and 1.0.\n\n Returns\n -------\n im_labeled : 2d-array, int\n Labeled segmentation mask.\n \"\"\"\n\n # Apply a median filter to remove hot pixels.\n med_selem = skimage.morphology.square(3)\n im_filt = skimage.filters.median(im, selem=med_selem)\n\n # Perform gaussian subtraction\n im_sub = bg_subtract(im_filt, radius)\n\n # Determine the thresholding method.\n if thresh is 'otsu':\n thresh = skimage.filters.threshold_otsu(im_sub)\n\n # Determine the image mode and apply threshold.\n if image_mode is 'phase':\n im_thresh = im_sub < thresh\n elif image_mode is 'fluorescence':\n im_thresh = im_sub > thresh\n else:\n raise ValueError(\"image mode not recognized. Must be 'phase'\"\n + \" or 'fluorescence'\")\n\n # Label the objects.\n im_label = skimage.measure.label(im_thresh)\n\n # Apply the area and eccentricity bounds.\n im_filt = area_ecc_filter(im_label, area_bounds, ecc_bounds)\n\n # Remove objects touching the border.\n im_border = skimage.segmentation.clear_border(im_filt, buffer_size=5)\n\n # Relabel the image.\n im_border = im_border > 0\n im_label = skimage.measure.label(im_border)\n\n return im_label\n\n\ndef bg_subtract(im, radius):\n \"\"\"\n Subtracts a gaussian blurred image from itself smoothing uneven\n illumination.\n\n Parameters\n ----------\n im : 2d-array\n Image to be subtracted\n radius : int or float\n Radius of gaussian blur\n\n Returns\n -------\n im_sub : 2d-array, float\n Background subtracted image.\n \"\"\"\n\n # Apply the gaussian filter.\n im_filt = skimage.filters.gaussian(im, radius)\n\n # Ensure the original image is a float\n if np.max(im) > 1.0:\n im = skimage.img_as_float(im)\n\n im_sub = im - im_filt\n\n return im_sub\n\n\ndef area_ecc_filter(im, area_bounds, ecc_bounds):\n \"\"\"\n Filters objects in an image based on their areas.\n\n Parameters\n ----------\n im : 2d-array, int\n Labeled segmentation mask to be filtered.\n area_bounds : tuple of ints\n Range of areas in which acceptable objects exist. This should be\n provided in units of square pixels.\n ecc_bounds : tuple of floats\n Range of eccentricities in which acceptable objects exist. This should be\n provided on the range of 0 to 1.0.\n\n Returns\n -------\n im_relab : 2d-array, int\n The relabeled, filtered image.\n \"\"\"\n\n # Extract the region props of the objects.\n props = skimage.measure.regionprops(im)\n\n # Extract the areas and labels.\n areas = np.array([prop.area for prop in props])\n eccs = np.array([prop.eccentricity for prop in props])\n labels = np.array([prop.label for prop in props])\n\n # Make an empty image to add the approved cells.\n im_approved = np.zeros_like(im)\n\n # Threshold the objects based on area and eccentricity\n for i, _ in enumerate(areas):\n if areas[i] > area_bounds[0] and areas[i] < area_bounds[1]\\\n and eccs[i] > ecc_bounds[0] and eccs[i] < ecc_bounds[1]:\n im_approved += im==labels[i]\n\n # Relabel the image.\n print(np.sum(im_approved))\n im_filt = skimage.measure.label(im_approved > 0)\n\n return im_filt\n\n\n# Load an E. coli test image.\necoli = skimage.io.imread('data/HG105_images/noLac_phase_0004.tif')\n\n# Using my knowledge of biology, we can draw some bounds.\n# Using the information in the problem statement, we know\n# the interpixel distance.\nip_dist = 0.0636 # in units of µm per pixel.\narea_bounds = (1/ip_dist**2, 10.0/ip_dist**2)\necc_bounds = (0.8, 1.0) # they are certainly not spheres.\n\n# Pass all images through our function.\necoli_seg = cell_segmenter(ecoli, area_bounds=area_bounds, ecc_bounds=ecc_bounds)\n\n# Extract and store the mean and total fluorescence intensities for each cell\n# in a single image in an array of pandas DataFrame\n\n# Load the fluorescence image.\necoli_yfp = skimage.io.imread('data/HG105_images/noLac_FITC_0004.tif')\n\n# Compute the regionproperties of our fluorescence image.\nprops = skimage.measure.regionprops(ecoli_seg, intensity_image = ecoli_yfp)\n\n# Extract the mean intensities\nmean_int = np.array([prop.mean_intensity for prop in props])\n\n# We will start with a simple histogram\nf1 = plt.figure(1)\nplt.hist(mean_int)\nplt.xlabel('mean pixel intensity')\nplt.ylabel('count')\n\n# To eliminate the bias, check ethe ECDF.\ndef ecdf(data):\n \"\"\" Compute x, y values for an empirical distribution function.\"\"\"\n x = np.sort(data)\n y = np.arange(1,len(data)+1) / len(data)\n return x, y\n\n# Compute the ECDF for the glow-y cells.\nintensities, ECDF = ecdf(mean_int)\n\n# Plotting\nf2 = plt.figure(2)\nplt.plot(intensities, ECDF, marker ='.', linestyle='none')\nplt.xlabel('intensities')\nplt.ylabel('ECDF')\n\n# Define the number of repetitions.\nn_reps = 100000\n\n# Initialize the replicates\nbootstrap_means = np.empty(n_reps)\n\n# Compute the replicates. Each bootstrap is plotted\nfor i in range(n_reps):\n resample = np.random.choice(mean_int, replace=True, size=len(mean_int))\n bootstrap_means[i] = np.mean(resample)\n\n# Compute the ECDF\nbs_means, bs_ECDF = ecdf(bootstrap_means)\n\n# Plot the ECDF\nf3 = plt.figure(3)\nplt.plot(bs_means, bs_ECDF, marker='.', linestyle='none')\nplt.xlabel('mean of bootstrapped intensities')\nplt.ylabel('ECDF')\nplt.margins(0.02)\n\n# Compute the 95% confidence interval\npercs = np.percentile(bootstrap_means, [97.5, 2.5])\nprint(\"\"\"\nThe 97.5% and the 2.5% of the bootstrapped data are {0:.3f}\nand {1:.3f}, respectively.\n\"\"\".format(percs[0], percs[1]))\n", "sub_path": "image_proc_practice2.py", "file_name": "image_proc_practice2.py", "file_ext": "py", "file_size_in_byte": 7303, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "seaborn.set", "line_number": 17, "usage_type": "call"}, {"api_name": "skimage.io.morphology.square", "line_number": 56, "usage_type": "call"}, {"api_name": "skimage.io.morphology", "line_number": 56, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 56, "usage_type": "name"}, {"api_name": "skimage.io.filters.median", "line_number": 57, "usage_type": "call"}, {"api_name": "skimage.io.filters", "line_number": 57, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 57, "usage_type": "name"}, {"api_name": "skimage.io.filters.threshold_otsu", "line_number": 64, "usage_type": "call"}, {"api_name": "skimage.io.filters", "line_number": 64, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 64, "usage_type": "name"}, {"api_name": "skimage.io.measure.label", "line_number": 76, "usage_type": "call"}, {"api_name": "skimage.io.measure", "line_number": 76, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 76, "usage_type": "name"}, {"api_name": "skimage.io.segmentation.clear_border", "line_number": 82, "usage_type": "call"}, {"api_name": "skimage.io.segmentation", "line_number": 82, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 82, "usage_type": "name"}, {"api_name": "skimage.io.measure.label", "line_number": 86, "usage_type": "call"}, {"api_name": "skimage.io.measure", "line_number": 86, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 86, "usage_type": "name"}, {"api_name": "skimage.io.filters.gaussian", "line_number": 110, "usage_type": "call"}, {"api_name": "skimage.io.filters", "line_number": 110, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 113, "usage_type": "call"}, {"api_name": "skimage.io.img_as_float", "line_number": 114, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 114, "usage_type": "name"}, {"api_name": "skimage.io.measure.regionprops", "line_number": 143, "usage_type": "call"}, {"api_name": "skimage.io.measure", "line_number": 143, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 143, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 160, "usage_type": "call"}, {"api_name": "skimage.io.measure.label", "line_number": 161, "usage_type": "call"}, {"api_name": "skimage.io.measure", "line_number": 161, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 161, "usage_type": "name"}, {"api_name": "skimage.io.io.imread", "line_number": 167, "usage_type": "call"}, {"api_name": "skimage.io.io", "line_number": 167, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 167, "usage_type": "name"}, {"api_name": "skimage.io.io.imread", "line_number": 183, "usage_type": "call"}, {"api_name": "skimage.io.io", "line_number": 183, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 183, "usage_type": "name"}, {"api_name": "skimage.io.measure.regionprops", "line_number": 186, "usage_type": "call"}, {"api_name": "skimage.io.measure", "line_number": 186, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 186, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "numpy.sort", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 221, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.margins", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 235, "usage_type": "call"}]} +{"seq_id": "184930065", "text": "import discord\nfrom discord.ext import commands\nfrom datetime import date, datetime\n\n\n# Class handles commands related to console players\nclass ConsoleCommands(commands.Cog, name=\"Console Commands\"):\n \"\"\"Console Commands\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n # Returns a list of embeds of console players so they can store their Paladins id's in the bot\n @commands.command(name='console', pass_context=True, ignore_extra=False, aliases=[\"Console\"])\n @commands.cooldown(3, 30, commands.BucketType.user)\n async def console(self, ctx, player_name, platform: str):\n async with ctx.channel.typing():\n platform = platform.lower()\n if platform == \"xbox\":\n platform = \"10\"\n elif platform == \"ps4\":\n platform = \"9\"\n elif platform == \"switch\":\n platform = \"22\"\n else:\n await ctx.send(\"```Invalid platform name. Valid platform names are:\\n1. Xbox\\n2. PS4\\n3. Switch```\")\n return None\n\n # players = paladinsAPI.getPlayerId(player_name, \"steam\")\n # players = paladinsAPI.getPlayerId(player_name, platform)\n\n players = self.bot.paladinsAPI.searchPlayers(player_name)\n\n if not players:\n await ctx.send(\"Found `0` players with the name `{}`.\".format(player_name))\n return None\n\n # Hi-Rez endpoint down.\n if players is None:\n await ctx.send(\"A Hi-Rez endpoint is down meaning this command won't work. \"\n \"Please don't try again for a while and give Hi-Rez a few hours to get the \"\n \"endpoint online again.\")\n return None\n\n players = [player for player in players if player.playerName.lower() == player_name.lower() and\n player['portal_id'] == platform]\n num_players = len(players)\n if num_players > 20: # Too many players...we must match case exactly\n await ctx.send(\"Found `{}` players with the name `{}`. Switching to case sensitive mode...\"\n .format(num_players, player_name))\n players = [player for player in players if player.playerName == player_name and\n player['portal_id'] == platform]\n num_players = len(players)\n await ctx.send(\"Found `{}` players with the name `{}`.\"\n .format(num_players, player_name))\n if num_players > 20:\n await ctx.send(\"```There are too many players with the name {}:\\n\\nPlease look on PaladinsGuru to \"\n \"find the Player ID```https://paladins.guru/search?term={}&type=Player\"\n .format(player_name, player_name))\n return None\n\n ss = \"\"\n recent_player = []\n for player in players:\n ss += str(player) + \"\\n\"\n player = self.bot.paladinsAPI.getPlayer(player=player.playerId)\n\n current_date = date.today()\n current_time = datetime.min.time()\n today = datetime.combine(current_date, current_time)\n last_seen = player.lastLoginDatetime\n last_seen = (today - last_seen).days\n\n # only add players seen in the last 90 days\n if last_seen <= 90:\n recent_player.append(player)\n\n await ctx.send(\"Found `{}` recent player(s) `(seen in the last 90 days)`\".format(len(recent_player)))\n for player in recent_player:\n current_date = date.today()\n current_time = datetime.min.time()\n today = datetime.combine(current_date, current_time)\n last_seen = player.lastLoginDatetime\n last_seen = (today - last_seen).days\n\n if last_seen <= 0:\n last_seen = \"Today\"\n else:\n last_seen = \"{} days ago\".format(last_seen)\n\n embed = discord.Embed(\n title=player.playerName,\n description=\"↓↓↓ Player ID ↓↓↓```fix\\n{}```\".format(player.playerId),\n colour=discord.colour.Color.dark_teal(),\n )\n embed.add_field(name='Last Seen:', value=last_seen, inline=True)\n embed.add_field(name='Account Level:', value=player.accountLevel, inline=True)\n embed.add_field(name='Hours Played:', value=player.hoursPlayed, inline=True)\n embed.add_field(name='Account Created:', value=player.createdDatetime, inline=True)\n await ctx.send(embed=embed)\n\n # Returns an embed of how to format a console name\n @commands.command(name='console_name')\n async def usage(self, ctx):\n embed = discord.Embed(\n title=\"How to format your console name in PaladinsAssistant.\",\n colour=discord.Color.dark_teal(),\n description=\"\\u200b\"\n )\n\n embed.add_field(name=\"To use a console name you must provide your name and platform surrounded in quotes.\",\n value=\"So for example a console player with the name `zombie killer` who plays on the \"\n \"`Switch` would type their name as follows in the stats command.\\n\\n\"\n \"`>>stats \\\"Zombie Killer Switch\\\"`\\n\\u200b\", inline=False)\n\n embed.add_field(\n name=\"Now if you want to make your life easier I would recommend storing/linking your name to the \"\n \"PaladinsAssistant.\",\n value=\"You can do this by using the `>>console` command to look up your Paladins `player_id` and then\"\n \"using the `>>store` command by doing `>>store your_player_id`. Then in commands you can just use \"\n \"the word `me` in place of your console name and platform.\\n\\u200b\", inline=False)\n\n embed.add_field(name=\"Below are the 3 steps (`with a picture`) of what you need to do if you are directed\"\n \" to use Guru's site to find a console `player_id from the console command.`\",\n value=\"```md\\n\"\n \"1. Use the link generated from the command or go to https://paladins.guru/ and type \"\n \"in the console player's name and then search.\\n\"\n \"2. Locate the account that you want and click on the name.\\n\"\n \"3. Then copy the number right before the player name.\\n\"\n \"4. Congrats you now have the console's players magical number.\\n```\", inline=False)\n\n embed.set_thumbnail(\n url=\"https://raw.githubusercontent.com/EthanHicks1/PaladinsAssistantBot/master/assets/Androxus.png\")\n embed.set_image(\n url=\"https://raw.githubusercontent.com/EthanHicks1/PaladinsAssistantBot/master/assets/Console.png\")\n embed.set_footer(text=\"If you still have questions feel free to message me @ FeistyJalapeno#9045. \"\n \"I am a very busy but will try to respond when I can.\")\n\n await ctx.send(embed=embed)\n\n\n# Add this class to the cog list\ndef setup(bot):\n bot.add_cog(ConsoleCommands(bot))\n", "sub_path": "cogs/Console.py", "file_name": "Console.py", "file_ext": "py", "file_size_in_byte": 7432, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 7, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 7, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 68, "usage_type": "name"}, {"api_name": "datetime.datetime.min.time", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime.min", "line_number": 69, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 69, "usage_type": "name"}, {"api_name": "datetime.datetime.combine", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 80, "usage_type": "name"}, {"api_name": "datetime.datetime.min.time", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime.min", "line_number": 81, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "name"}, {"api_name": "datetime.datetime.combine", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 82, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 91, "usage_type": "call"}, {"api_name": "discord.colour.Color.dark_teal", "line_number": 94, "usage_type": "call"}, {"api_name": "discord.colour", "line_number": 94, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 14, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 14, "usage_type": "name"}, {"api_name": "discord.ext.commands.cooldown", "line_number": 15, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 15, "usage_type": "name"}, {"api_name": "discord.ext.commands.BucketType", "line_number": 15, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 105, "usage_type": "call"}, {"api_name": "discord.Color.dark_teal", "line_number": 107, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 107, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 103, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "519502740", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 17 13:49:30 2018\r\n\r\n@author: pydea\r\n\"\"\"\r\n\r\nimport scipy.io as sio\r\nimport imageio as imgio\r\nimport numpy as np\r\nfrom numpy.random import rand\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nimport dlib\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# This function returns the K augmented images of just the face area, as well\r\n# as the extracted dlib features of these augmented images\r\n# augmentation_num is the number of augmented images to create\r\n\r\ndef augment_and_extract_features(new_image_filename,augmentation_num):\r\n \r\n ## FIND THE FACE IN THE INPUT IMAGE ---------------------------------------------------------------------\r\n \r\n #new_image_filename = './new_img/Peter_Deaville/Peter_Deaville_0001.jpg'\r\n #Predictor path is for finding the face in the image -- will skip this step since we are using cropped images\r\n predictor_path = './dlib_models/shape_predictor_5_face_landmarks.dat'\r\n facerec_path = './dlib_models/dlib_face_recognition_resnet_model_v1.dat'\r\n \r\n detector = dlib.get_frontal_face_detector()\r\n sp = dlib.shape_predictor(predictor_path)\r\n \r\n # Detect the faces in the image\r\n im = imgio.imread(new_image_filename)\r\n dets = detector(im, 1)\r\n if len(dets) == 0: # If it detects no faces, default to using the whole thing\r\n d = dlib.rectangle(0,0,im.shape[0],im.shape[1])\r\n print('WARNING NO FACES FOUND')\r\n else: # If it detects a face, get the shape and just do augmentation on the first detected face\r\n d = dets[0]\r\n \r\n # Record the coordinates of where the face is\r\n np_shape = np.asarray([d.top(), d.bottom(), d.left(), d.right()])\r\n shape = sp(im,d)\r\n \r\n # now downsize the image to the recovered coordinates\r\n im_just_face = np.asarray(im[np_shape[0]:np_shape[1],np_shape[2]:np_shape[3]])\r\n\r\n#im_just_face = np.reshape(im_just_face, [1, im_just_face.shape[0], im_just_face.shape[1], im_just_face.shape[2]])\r\n\r\n## AUGMENT THE INPUT IMAGE ---------------------------------------------------------------------\r\n\r\n# train_datagen_obj = ImageDataGenerator(\r\n# rescale=1./255,\r\n# rotation_range=20,\r\n# width_shift_range=0.2,\r\n# height_shift_range=0.2,\r\n# horizontal_flip=True,\r\n# #shear_range=0.3,\r\n# zoom_range=0.3,\r\n# fill_mode='nearest')\r\n# \r\n# train_generator = train_datagen_obj.flow(\r\n# im_just_face,\r\n# batch_size=64)\r\n# \r\n# aug_results = np.zeros([augmentation_num, 1, im_just_face.shape[1], im_just_face.shape[2], 3])\r\n# \r\n facerec = dlib.face_recognition_model_v1(facerec_path)\r\n base_features = facerec.compute_face_descriptor(im, shape)\r\n aug_results_features = np.zeros((augmentation_num,128))\r\n for i in range(augmentation_num):\r\n aug_results_features[i,:] = base_features + rand(1,128)/30\r\n\r\n#aug_results = np.squeeze(aug_results)\r\n\r\n## RUN DLIB ON THE AUGMENTED RESULTS ---------------------------------------------------------------------\r\n# f = np.zeros(128)\r\n# facerec = dlib.face_recognition_model_v1(facerec_path)\r\n# aug_results_features = np.zeros([128,augmentation_num])\r\n# d = dlib.rectangle(0,0,im_just_face.shape[1],im_just_face.shape[2]) \r\n# for i in range(augmentation_num):\r\n# shape = sp(aug_results[i,:,:,:], d)\r\n# face_desc = facerec.compute_face_descriptor(aug_results[i,:,:,:], shape)\r\n# # save this face descriptor object in a numpy array\r\n# for j in range(128):\r\n# f[j] = face_desc[j]\r\n# # append to features array, same index as the original array\r\n# aug_results_features[:,i] = f\r\n# \r\n return aug_results_features\r\n", "sub_path": "MATLAB_GUI_peters_copy/img_augment_extract.py", "file_name": "img_augment_extract.py", "file_ext": "py", "file_size_in_byte": 3745, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "dlib.get_frontal_face_detector", "line_number": 30, "usage_type": "call"}, {"api_name": "dlib.shape_predictor", "line_number": 31, "usage_type": "call"}, {"api_name": "imageio.imread", "line_number": 34, "usage_type": "call"}, {"api_name": "dlib.rectangle", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 47, "usage_type": "call"}, {"api_name": "dlib.face_recognition_model_v1", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "81113855", "text": "from django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response, render\n\nfrom .forms import ContactForm, OrderEventForm, OrderContactForm\n\n\ndef contactform(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n subject = form.cleaned_data['subject']\n message = form.cleaned_data['message']\n sender = form.cleaned_data['sender']\n cc_myself = form.cleaned_data['cc_myself']\n recipients = ['support@openslides.org']\n if cc_myself:\n recipients.append(sender)\n from django.core.mail import send_mail\n send_mail(subject, message, sender, recipients)\n return HttpResponseRedirect(reverse('thankscontact'))\n else:\n form = ContactForm()\n return render(request, 'contact-form.html', {\n 'form': form,\n })\n\n\ndef orderform(request, package):\n if request.method == 'POST':\n form_event = OrderEventForm(request.POST)\n form_contact = OrderContactForm(request.POST)\n if form_event.is_valid() and form_contact.is_valid():\n # event\n event_name = form_event.cleaned_data['event_name']\n event_date = form_event.cleaned_data['event_date']\n event_location = form_event.cleaned_data['event_location']\n event_participants = form_event.cleaned_data['event_participants']\n # contact\n contact_organisation = form_contact.cleaned_data['contact_organisation']\n contact_name = form_contact.cleaned_data['contact_name']\n contact_phone = form_contact.cleaned_data['contact_phone']\n contact_email = form_contact.cleaned_data['contact_email']\n contact_street = form_contact.cleaned_data['contact_street']\n contact_postcode = form_contact.cleaned_data['contact_postcode']\n contact_location = form_contact.cleaned_data['contact_location']\n message = form_contact.cleaned_data['message']\n # mail\n recipients = ['emanuel@intevation.de']\n message = \"Neue Anfrage: OpenSlides Paket #%s\\n\\n\"\\\n \"Veranstaltungsname: %s\\n\"\\\n \"Veranstaltungszeitraum: %s\\n\"\\\n \"Veranstaltungsort: %s\\n\"\\\n \"Erwartete Teilnehmer: %s\\n\\n\"\\\n \"Organisation: %s\\n\"\\\n \"Ansprechpartner: %s\\n\"\\\n \"Telefon: %s\\n\"\\\n \"E-Mail: %s\\n\"\\\n \"Strasse: %s\\n\"\\\n \"PLZ: %s\\n\"\\\n \"Ort: %s\\n\\n\"\\\n \"Nachricht: %s\\n\"\\\n % (package, event_name, event_date, event_location,\n event_participants,\n contact_organisation, contact_name, contact_phone, contact_email,\n contact_street, contact_postcode, contact_location,\n message)\n from django.core.mail import send_mail\n send_mail(\"Anfrage OpenSlides-Supportpaket\", message, contact_email, recipients)\n return HttpResponseRedirect(reverse('thanksorder'))\n\n else:\n form_event = OrderEventForm()\n form_contact = OrderContactForm()\n return render(request, 'order-form.html', {\n 'form_event': form_event,\n 'form_contact': form_contact,\n 'package': package,\n })\n", "sub_path": "openslides_website/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3435, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "forms.ContactForm", "line_number": 10, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 20, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 21, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 21, "usage_type": "call"}, {"api_name": "forms.ContactForm", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "forms.OrderEventForm", "line_number": 31, "usage_type": "call"}, {"api_name": "forms.OrderContactForm", "line_number": 32, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 69, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 70, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 70, "usage_type": "call"}, {"api_name": "forms.OrderEventForm", "line_number": 73, "usage_type": "call"}, {"api_name": "forms.OrderContactForm", "line_number": 74, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "339440724", "text": "from django.conf.urls.defaults import *\nfrom django.views.generic import ListView\nfrom historiador.models import Registro\n\t\nurlpatterns = patterns('',\n\turl(r'^$',\n ListView.as_view(\n queryset = Registro.objects.order_by('id'),\n context_object_name = 'latest_list', \n template_name = 'historiador/index.html')), #read\n)", "sub_path": "historiador/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 358, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.views.generic.ListView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 7, "usage_type": "name"}, {"api_name": "historiador.models.Registro.objects.order_by", "line_number": 8, "usage_type": "call"}, {"api_name": "historiador.models.Registro.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "historiador.models.Registro", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "386289771", "text": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.forms import ModelForm, HiddenInput\nfrom Plan_aula.models import PlanA\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.decorators import login_required\n\n\nclass PlanAForm(ModelForm):\n class Meta:\n model = PlanA\n fields = ['uc', 'evento', 'ch', 'obj', 'docente', 'user']\n widgets = {'user': HiddenInput()}\n\n\ndef PlanA_list(request, template_name='Plan_aula/plana_list.html'):\n if request.user.id == 1:\n planlist = PlanA.objects.all()\n paginator = Paginator(planlist, 5) # paginação, Show 3 contacts per page\n page = request.GET.get('page')\n plana = paginator.get_page(page)\n else:\n planlist = PlanA.objects.filter(user=request.user)\n paginator = Paginator(planlist, 5) # paginação, Show 3 contacts per page\n page = request.GET.get('page')\n plana = paginator.get_page(page)\n data = {'object_list': plana}\n return render(request, template_name, data)\n\n\ndef PlanA_create(request, template_name='Plan_aula/plana_form.html'):\n form = PlanAForm(request.POST or None)\n form.fields['user'].initial = request.user.id\n if form.is_valid():\n form.save()\n return redirect('Plan_aula:plan_list')\n return render(request, template_name, {'form': form})\n\ndef PlanA_update(request, pk, template_name='Plan_aula/plana_form.html'):\n plana = get_object_or_404(PlanA, pk=pk)\n form = PlanAForm(request.POST or None, instance=plana)\n if form.is_valid():\n form.save()\n return redirect('Plan_aula:plan_list')\n return render(request, template_name, {'form':form})\n\ndef PlanA_delete(request, pk, template_name='Plan_aula/plana_confirm_delete.html'):\n plana = get_object_or_404(PlanA, pk=pk)\n if request.method=='POST':\n plana.delete()\n return redirect('Plan_aula:plan_list')\n return render(request, template_name, {'object':plana})\n", "sub_path": "Plan_aula/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1975, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.forms.ModelForm", "line_number": 8, "usage_type": "name"}, {"api_name": "Plan_aula.models.PlanA", "line_number": 10, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 12, "usage_type": "call"}, {"api_name": "Plan_aula.models.PlanA.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "Plan_aula.models.PlanA.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "Plan_aula.models.PlanA", "line_number": 17, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 18, "usage_type": "call"}, {"api_name": "Plan_aula.models.PlanA.objects.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "Plan_aula.models.PlanA.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "Plan_aula.models.PlanA", "line_number": 22, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 39, "usage_type": "call"}, {"api_name": "Plan_aula.models.PlanA", "line_number": 39, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 44, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 47, "usage_type": "call"}, {"api_name": "Plan_aula.models.PlanA", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 50, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "601726073", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport json\nimport os\nimport sys\nimport urllib.request\n\nEXHIBITOR_API = os.getenv('ZK_API')\nif not EXHIBITOR_API:\n print(\"Missing environment variable [ZK_API].\")\n sys.exit(1)\n\n\ndef get_cluster_list():\n url = EXHIBITOR_API + '/cluster/list'\n try:\n request = urllib.request.Request(url)\n response = urllib.request.urlopen(request)\n code = response.getcode()\n content = response.readall().decode('utf-8')\n response.close()\n if code != 200:\n print('ERROR Received unexpected status code from Exhibitor: [{}]'.format(code))\n exit(1)\n return json.loads(content)\n except Exception as e:\n print('ERROR Failed sending request to Exhibitor [{}]: {}'.format(url, e))\n exit(1)\n\n\ncluster_list = get_cluster_list()\nservers = cluster_list['servers']\nport = cluster_list['port']\n\noutput = ','.join(map(lambda x: x + ':' + str(port), servers))\n# Output list of currently active Zookeeper servers\nprint(output)\n", "sub_path": "scripts/get_zk_servers.py", "file_name": "get_zk_servers.py", "file_ext": "py", "file_size_in_byte": 1042, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.getenv", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 18, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 18, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 18, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 19, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 19, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 19, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "421951663", "text": "import discord\r\nfrom discord.ext import commands, tasks\r\nfrom discord.ext.commands.core import has_permissions\r\nfrom discord.utils import get\r\nfrom discord.ext.tasks import loop\r\nfrom discord import Embed\r\nimport sqlite3\r\nimport datetime\r\nimport asyncio\r\nimport time\r\nimport sys\r\nimport yaml\r\nimport random\r\nimport string\r\nfrom math import ceil\r\n\r\ninfos = 0\r\n\r\nwith open(\"config.yml\", \"r\", encoding='utf8') as ymlfile:\r\n cfg = yaml.load(ymlfile)\r\n\r\n\r\nprefix = cfg[\"bot_config\"][\"prefix\"]\r\nclient = commands.Bot(command_prefix = prefix)\r\nclient.remove_command('help')\r\nTOKEN = str(os.environ.get('BOT_TOKEN'))\r\n\r\nconnection = sqlite3.connect(\"bdd.db\")\r\ncursor = connection.cursor()\r\n\r\n@client.command()\r\nasync def debug(ctx):\r\n if ctx.message.author.guild_permissions.administrator:\r\n await update_counter(ctx.guild)\r\n await ctx.send(\":white_check_mark: Compteurs mis à jour\")\r\n else:\r\n await ctx.send(\":x:\")\r\n\r\n@client.command()\r\nasync def counter(ctx):\r\n if ctx.message.author.guild_permissions.administrator:\r\n # Créer / Supprimer / Liste\r\n await ctx.send(\":one: Veuillez envoyer l'action que vous voulez faire (**créer** un nouveau compteur, **supprimer** un compteur existant ou avoir la **liste** des compteurs existants) :\")\r\n while True:\r\n user = ctx.author.id\r\n channel = ctx.message.channel\r\n try:\r\n msg = await client.wait_for('message', check=lambda message: message.author.id == user and message.channel == channel, timeout=240)\r\n content = msg.content\r\n if \"cancel\" in content:\r\n await ctx.send(\"Compteur annulée...\")\r\n return\r\n if \"réer\" in content:\r\n action = 0\r\n await ctx.send(\":one: Très bien, nous allons **créer** un nouveau compteur !\")\r\n break\r\n elif \"upprimer\" in content:\r\n action = 1\r\n await ctx.send(\":one: Très bien, nous allons **supprimer** un compteur !\")\r\n break\r\n elif \"ist\" in content:\r\n action = 2\r\n await ctx.send(\":one: Très bien, je vais envoyer une **liste** des compteurs !\")\r\n break\r\n else:\r\n await ctx.send(\":x: Action inconnue, réessayez !\")\r\n continue\r\n except asyncio.TimeoutError():\r\n await ctx.send(\"Vous avez mis trop de temps à répondre, création annulée...\")\r\n return\r\n check = cursor.execute(f\"SELECT count(*) FROM sqlite_master WHERE type='table' AND name='{ctx.guild.id}_counters'\")\r\n check = int(check.fetchone()[0])\r\n if check == 0:\r\n cursor.execute(f\"CREATE TABLE '{ctx.guild.id}_counters' (type INTEGER, channel_name TEXT, channel_id INTEGER, role_id INTEGER)\")\r\n connection.commit()\r\n if action == 1:\r\n await ctx.send(\":x: Il n'y a aucun compteur à supprimer\")\r\n return\r\n #-----CREER-----#\r\n if action == 0:\r\n # Type\r\n await ctx.send(\":two: Veuillez envoyer le type de compteur (le numéro) \\n**1**. Compteur de membres \\n**2**. Compteur de bots \\n**3**. Compteur de membres avec un certain rôle \\n**4**. Compteur de salons\")\r\n while True:\r\n user = ctx.author.id\r\n channel = ctx.message.channel\r\n try:\r\n msg = await client.wait_for('message', check=lambda message: message.author.id == user and message.channel == channel, timeout=240)\r\n content = msg.content\r\n if \"cancel\" in content:\r\n await ctx.send(\"Compteur annulée...\")\r\n return\r\n if \"1\" in content: #membres\r\n counter_type = 1\r\n await ctx.send(\":two: Très bien, ce sera un **Compteur de membres**\")\r\n break\r\n elif \"2\" in content: #bots\r\n counter_type = 2\r\n await ctx.send(\":two: Très bien, ce sera un **Compteur de bots**\")\r\n break\r\n elif \"3\" in content: #membres (role)\r\n counter_type = 3\r\n await ctx.send(\":two: Très bien, ce sera un **Compteur de membres avec un certain rôle**\")\r\n break\r\n elif \"4\" in content: #salons\r\n counter_type = 4\r\n await ctx.send(\":two: Très bien, ce sera un **Compteur de salons**\")\r\n break\r\n else:\r\n await ctx.send(\":x: Type inconnu, réessayez !\")\r\n continue\r\n except asyncio.TimeoutError():\r\n await ctx.send(\"Vous avez mis trop de temps à répondre, création annulée...\")\r\n return\r\n # Nom du salon\r\n await ctx.send(\":three: Veuillez envoyer le nom que vous voulez pour le salon (*exemple :* **Bots :**)\")\r\n while True:\r\n user = ctx.author.id\r\n channel = ctx.message.channel\r\n try:\r\n msg = await client.wait_for('message', check=lambda message: message.author.id == user and message.channel == channel, timeout=240)\r\n content = msg.content\r\n if \"cancel\" in content:\r\n await ctx.send(\"Compteur annulée...\")\r\n return\r\n channel_name = content\r\n await ctx.send(f\":two: Très bien, ce salon sera nommé **{channel_name}**\")\r\n break\r\n except asyncio.TimeoutError():\r\n await ctx.send(\"Vous avez mis trop de temps à répondre, création annulée...\")\r\n return\r\n # Role (si compteur de membres avec un certain rôle)\r\n if counter_type == 3:\r\n await ctx.send(\":four: Veuillez mentionner le rôle pour le compteur\")\r\n while True:\r\n user = ctx.author.id\r\n channel = ctx.message.channel\r\n try:\r\n msg = await client.wait_for('message', check=lambda message: message.author.id == user and message.channel == channel, timeout=240)\r\n content = msg.content\r\n if \"cancel\" in content:\r\n await ctx.send(\"Compteur annulée...\")\r\n return\r\n try:\r\n role_id = content\r\n role_id = role_id.replace(\"<\", \"\")\r\n role_id = role_id.replace(\"@\", \"\")\r\n role_id = role_id.replace(\"&\", \"\")\r\n role_id = role_id.replace(\">\", \"\")\r\n role_id = int(role_id)\r\n role = discord.utils.get(ctx.guild.roles, id=role_id)\r\n if not role:\r\n await ctx.send(\":x: Veuillez mentionner un rôle valide !\")\r\n continue\r\n else:\r\n await ctx.send(f\":four: Très bien, le rôle du compteur sera **{role.name}**\")\r\n break\r\n except:\r\n await ctx.send(\":x: Veuillez mentionner un rôle valide !\")\r\n continue\r\n except asyncio.TimeoutError():\r\n await ctx.send(\"Vous avez mis trop de temps à répondre, création annulée...\")\r\n return\r\n overwrites = {\r\n ctx.guild.default_role: discord.PermissionOverwrite(connect=False),\r\n }\r\n channel = await ctx.guild.create_voice_channel(channel_name, overwrites=overwrites)\r\n if counter_type == 3:\r\n cursor.execute(f'''INSERT INTO '{ctx.guild.id}_counters' VALUES ({counter_type}, \"{channel_name}\", {channel.id}, {role.id}) ''')\r\n connection.commit()\r\n else:\r\n cursor.execute(f'''INSERT INTO '{ctx.guild.id}_counters' (type, channel_name, channel_id) VALUES ({counter_type}, \"{channel_name}\", {channel.id}) ''')\r\n connection.commit()\r\n await update_counter(ctx.guild)\r\n await ctx.send(\":white_check_mark: Compteur créé (vous pouvez le déplacer dans la catégorie de votre choix !\")\r\n return\r\n #-----SUPPRIMER-----#\r\n elif action == 1:\r\n check = cursor.execute(f\"SELECT count(*) FROM sqlite_master WHERE type='table' AND name='{ctx.guild.id}_counters'\")\r\n check = int(check.fetchone()[0])\r\n if check == 0:\r\n await ctx.send(\":x: Il n'y a aucun compteur sur ce serveur\")\r\n return\r\n counters = cursor.execute(f\"SELECT channel_id FROM '{ctx.guild.id}_counters'\")\r\n counters = counters.fetchall()\r\n counters = [item for t in counters for item in t]\r\n # ID du compteur\r\n await ctx.send(\":two: Veuillez envoyer l'**ID** du compteur (visible dans la liste)\")\r\n while True:\r\n user = ctx.author.id\r\n channel = ctx.message.channel\r\n try:\r\n msg = await client.wait_for('message', check=lambda message: message.author.id == user and message.channel == channel, timeout=240)\r\n content = msg.content\r\n if \"cancel\" in content:\r\n await ctx.send(\"Compteur annulée...\")\r\n return\r\n try:\r\n counter_id = int(content)\r\n channel_id = int(counters[counter_id-1])\r\n except:\r\n await ctx.send(\":x: Veuillez entrer un ID correct\")\r\n continue\r\n await ctx.send(f\":two: Suppression du compteur n°**{counter_id}**\")\r\n break\r\n except asyncio.TimeoutError():\r\n await ctx.send(\"Vous avez mis trop de temps à répondre, création annulée...\")\r\n return\r\n channel_name = cursor.execute(f\"SELECT channel_name FROM '{ctx.guild.id}_counters' WHERE channel_id={channel_id}\")\r\n channel_name = channel_name.fetchone()[0]\r\n cursor.execute(f\"DELETE FROM '{ctx.guild.id}_counters' WHERE channel_id={channel_id}\")\r\n connection.commit()\r\n channel = discord.utils.get(ctx.guild.voice_channels, id=channel_id)\r\n await channel.delete()\r\n await ctx.send(f\":white_check_mark: Le salon **{channel_name}** (compteur n°**{counter_id}**) a été supprimé !\")\r\n await update_counter(ctx.guild)\r\n return\r\n #-----LISTE-----#\r\n elif action == 2:\r\n check = cursor.execute(f\"SELECT count(*) FROM sqlite_master WHERE type='table' AND name='{ctx.guild.id}_counters'\")\r\n check = int(check.fetchone()[0])\r\n if check == 0:\r\n await ctx.send(\":x: Il n'y a aucun compteur sur ce serveur\")\r\n return\r\n \r\n counters = cursor.execute(f\"SELECT type, channel_name FROM '{ctx.guild.id}_counters'\")\r\n counters = counters.fetchall()\r\n counters = [item for t in counters for item in t]\r\n i = 0\r\n final_send = []\r\n final_send.append(\"**Liste des compteurs du serveur :**\\n\")\r\n for e in range(ceil(len(counters)/2)):\r\n counter_type = int(counters[i])\r\n channel_name = counters[i+1]\r\n if counter_type == 1:\r\n counter_type = \"Compteur de membres\"\r\n elif counter_type == 2:\r\n counter_type = \"Compteur de bots\"\r\n elif counter_type == 3:\r\n counter_type = \"Compteur de membres avec un certain rôle\"\r\n elif counter_type == 4:\r\n counter_type = \"Compteur de salons\"\r\n final_send.append(f'''- ID : **{e+1}** | Type : **{counter_type}** | Nom : **{channel_name}**\\n''')\r\n i += 2\r\n final_send = ''.join(final_send)\r\n await ctx.send(final_send)\r\n \r\n#-----EVENTS-----#\r\n\r\n@client.event\r\nasync def on_guild_channel_delete(channel):\r\n await update_counter(channel.guild)\r\n\r\n@client.event\r\nasync def on_guild_channel_create(channel):\r\n await update_counter(channel.guild)\r\n\r\n@client.event\r\nasync def on_guild_channel_update(before, after):\r\n async for entry in after.guild.audit_logs(limit=1, action=discord.AuditLogAction.channel_update):\r\n entry = entry \r\n if entry.user != client.user:\r\n await update_counter(after.guild)\r\n \r\n@client.event\r\nasync def on_member_join(member):\r\n await update_counter(member.guild)\r\n\r\n@client.event\r\nasync def on_member_remove(member):\r\n await update_counter(member.guild)\r\n\r\n@client.event\r\nasync def on_member_update(before, after):\r\n await update_counter(after.guild)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nasync def update_counter(guild: discord.guild.Guild):\r\n check = cursor.execute(f\"SELECT count(*) FROM sqlite_master WHERE type='table' AND name='{guild.id}_counters'\")\r\n check = int(check.fetchone()[0])\r\n if check != 0:\r\n counters = cursor.execute(f\"SELECT type, channel_name, channel_id FROM '{guild.id}_counters'\")\r\n counters = counters.fetchall()\r\n counters = [item for t in counters for item in t]\r\n i = 0\r\n for e in range(ceil(len(counters)/3)):\r\n counter_type = int(counters[i])\r\n if counter_type == 1:\r\n members_list = []\r\n for member in guild.members:\r\n if not member.bot:\r\n members_list.append(member)\r\n channel = discord.utils.get(guild.voice_channels, id=int(counters[i+2]))\r\n name = f\"{counters[i+1]} {len(members_list)}\"\r\n if channel.name != name:\r\n await channel.edit(name=name)\r\n elif counter_type == 2:\r\n bot_list = []\r\n for member in guild.members:\r\n if member.bot:\r\n bot_list.append(member)\r\n channel = discord.utils.get(guild.voice_channels, id=int(counters[i+2]))\r\n name = f\"{counters[i+1]} {len(bot_list)}\"\r\n if channel.name != name:\r\n await channel.edit(name=name)\r\n elif counter_type == 3:\r\n role_id = cursor.execute(f\"SELECT role_id FROM '{guild.id}_counters' WHERE channel_id={counters[i+2]}\")\r\n role_id = int(role_id.fetchone()[0])\r\n role = discord.utils.get(guild.roles, id=role_id)\r\n role_list = []\r\n for member in guild.members:\r\n if role in member.roles:\r\n role_list.append(member)\r\n role_list = len(role_list)\r\n channel = discord.utils.get(guild.voice_channels, id=int(counters[i+2]))\r\n name = f\"{counters[i+1]} {role_list}\"\r\n if channel.name != name:\r\n await channel.edit(name=name)\r\n elif counter_type == 4:\r\n channel = discord.utils.get(guild.voice_channels, id=int(counters[i+2]))\r\n list_channels = len(guild.channels)\r\n name = f\"{counters[i+1]} {list_channels}\"\r\n if channel.name != name:\r\n await channel.edit(name=name)\r\n i += 3\r\n\r\n\r\n\r\n@client.event\r\nasync def on_ready():\r\n status = cfg[\"bot_config\"][\"status\"]\r\n game = discord.Game(name=status)\r\n await client.change_presence(status=discord.Status.online, activity=game)\r\n print(f\"Bot connecté en tant que : {client.user.name}\")\r\n\r\n\r\n\r\nclient.run(TOKEN)\r\n", "sub_path": "counter.py", "file_name": "counter.py", "file_ext": "py", "file_size_in_byte": 16278, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "yaml.load", "line_number": 20, "usage_type": "call"}, {"api_name": "discord.ext.commands.Bot", "line_number": 24, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 28, "usage_type": "call"}, {"api_name": "asyncio.TimeoutError", "line_number": 68, "usage_type": "call"}, {"api_name": "asyncio.TimeoutError", "line_number": 111, "usage_type": "call"}, {"api_name": "asyncio.TimeoutError", "line_number": 128, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 150, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 150, "usage_type": "attribute"}, {"api_name": "asyncio.TimeoutError", "line_number": 160, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 164, "usage_type": "call"}, {"api_name": "asyncio.TimeoutError", "line_number": 205, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 212, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 212, "usage_type": "attribute"}, {"api_name": "math.ceil", "line_number": 231, "usage_type": "call"}, {"api_name": "discord.AuditLogAction", "line_number": 259, "usage_type": "attribute"}, {"api_name": "discord.guild", "line_number": 282, "usage_type": "attribute"}, {"api_name": "math.ceil", "line_number": 290, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 297, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 297, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 306, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 306, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 313, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 313, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 319, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 319, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 324, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 324, "usage_type": "attribute"}, {"api_name": "discord.Game", "line_number": 336, "usage_type": "call"}, {"api_name": "discord.Status", "line_number": 337, "usage_type": "attribute"}]} +{"seq_id": "616631332", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom githubspider.items import GitspiderItem\n\nclass GtcrawlSpider(scrapy.Spider):\n name = 'gtcrawl'\n allowed_domains = ['github.com']\n start_urls = ('https://github.com/shiyanlou?page={}&tab=repositories'.format(i) for i in range(1,5))\n\n def parse(self, response):\n for rep in response.xpath('//div[@id=\"user-repositories-list\"]//li'):\n \titem = GitspiderItem()\n \titem['name'] = rep.xpath('.//h3/a/text()').extract_first().strip()\n \titem['update_time'] = rep.xpath('.//relative-time/@datetime').extract_first()\n \tres_url = response.urljoin(rep.xpath('.//@href').extract_first())\n \trequest = scrapy.Request(res_url,callback=self.parse_res)\n \trequest.meta['item'] = item\n \tyield request\n def parse_res(self,response):\n \titem = response.meta['item']\n \titem['commits'] = ''.join(response.xpath('//li[@class=\"commits\"]/a/span/text()').extract_first(default='0').strip().split(','))\n \titem['branches'] = response.xpath('//li[2]/a/span/text()').extract_first(default='0').strip()\n \titem['releases'] = response.xpath('//li[3]/a/span/text()').extract_first(default='110').strip()\n \tyield item\n", "sub_path": "challenge19/githubspider/githubspider/spiders/gtcrawl.py", "file_name": "gtcrawl.py", "file_ext": "py", "file_size_in_byte": 1207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "scrapy.Spider", "line_number": 5, "usage_type": "attribute"}, {"api_name": "githubspider.items.GitspiderItem", "line_number": 12, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "202798795", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 2 13:26:03 2021\n\n@author: JZ2018\n\"\"\"\n\nimport lxml\nimport time\nimport requests\nimport pandas as pd\nimport datetime\nfrom re import findall\nfrom copy import copy\nfrom random import randint\nfrom numpy import where\n\n#import functions from source file\n#execfile('D:/JZP/dl_funs.py')\n\n#function to create url for downloading aeso electricity data\ndef aeso_url_gen(tablename, contenttype, startdate, enddate, dateformat = None):\n #convert start and end dates to url format\n startdate = dateconv(startdate, dateformat)\n enddate = dateconv(enddate, dateformat)\n url = f'http://ets.aeso.ca/ets_web/ip/Market/Reports/{tablename}ReportServlet?beginDate={startdate}&endDate={enddate}&contentType={contenttype}'\n #time.sleep(3)\n print('download url generated: ' + url)\n return url\n\n#function to convert y-m-d date string to mdy in url generation\ndef dateconv(mydate, dateformat = None):\n #check if mydate is already in datetime format, convert if not\n if not isinstance(mydate, datetime.datetime) and dateformat is not None: \n mydate = datetime.datetime.strptime(mydate, dateformat)\n\n #create string formatted for url date input\n yr = str(mydate.year)\n mo = '0' + str(mydate.month) if mydate.month < 10 else str(mydate.month)\n d = '0' + str(mydate.day) if mydate.day < 10 else str(mydate.day)\n output = mo + d + yr\n print('converted date string from ' + str(mydate) + ' to datetime value ' + output)\n return output\n\n#function to download and pre process dataframe from aeso\ndef aeso_download_one(url):\n #get data into pandas df from processed url\n download_raw = pd.read_html(url)\n df = range_dl_combine(download_raw)\n #flatten multi index column names into single column names\n if len(df.columns.names)>1:\n cols = ''\n for l in range(0, len(df.columns.names)):\n if l == 0:\n cols = df.columns.get_level_values(l)\n else:\n cols = cols +'_'+df.columns.get_level_values(l)\n df.columns = cols\n else:\n cols = df.columns\n #convert date columns to datetime format\n datecols = [n for n in cols if len(findall('(?i)date', n))>0]\n for dc in datecols:\n #convert hour 24 to hour 00 for pandas to_datetime conversion\n df[dc] = df[dc].str[:-2]+where(df[dc].str[-2:]=='24', '00', df[dc].str[-2:])\n #convert column to date time\n df[dc] = df[dc].str.replace('*', '')\n df[dc] = df[dc].apply(pd.to_datetime)\n return df\n\n#function to download data for timerange\ndef aeso_download_range(tablename, contenttype, startdate, enddate, dateformat):\n #convert start/end to date time format\n startdate = datetime.datetime.strptime(startdate, dateformat)\n enddate = datetime.datetime.strptime(enddate, dateformat)\n \n #trim end date if it's greater than current datetime\n if enddate > datetime.datetime.now():\n enddate = copy(datetime.datetime.now())\n \n #calculate number of days from start to end\n dayrange = (enddate - startdate).days\n #remove combine_df object if exists\n if 'combine_df' in locals() or 'combine_df' in globals():\n del combine_df\n \n #pull directly if range less than 30 days\n if dayrange <=30:\n print('date range <= 30 days, downloading')\n url = aeso_url_gen(tablename, contenttype, startdate, enddate, dateformat) \n combine_df = aeso_download_one(url)\n else:\n #create list of start/end dates in 30 day intervals\n dl_ranges = dayrange_parse(startdate, enddate)\n\n #loop through list of start/end dates and pull each one, append together into combine_df\n for r in range(0,len(dl_ranges)):\n range_start = dl_ranges[r][0]\n range_end = dl_ranges[r][1]\n url = aeso_url_gen(tablename, contenttype, range_start, range_end, dateformat) \n df = aeso_download_one(url)\n if 'combine_df' in locals() or 'combine_df' in globals():\n combine_df = combine_df.append(df, ignore_index = True)\n else:\n combine_df = copy(df)\n print(str(range_start) + 'downloaded')\n sleeptime = randint(3,9)\n print('sleep for ' + str(sleeptime) + ' seconds')\n #time.sleep(sleeptime)\n \n return combine_df\n\n#parse start/end range into list of start/end dates in 30 day intervals\ndef dayrange_parse(startdate, enddate):\n r_start = copy(startdate)\n out_list = []\n while r_start < enddate:\n if r_start + datetime.timedelta(days=30) > enddate:\n r_end = copy(enddate)\n else:\n r_end = r_start + datetime.timedelta(days=30)\n out_list.append([r_start, r_end])\n r_start = copy(r_end) + datetime.timedelta(days=1)\n if r_start >= enddate:\n break \n print('date range > 30 days, parse into ' + str(len(out_list)) + ' number of downloads')\n return out_list\n\n\ndef range_dl_combine(download_raw):\n for d in range(0, len(download_raw)):\n df_rows = download_raw[d].shape[0]\n if df_rows > 3:\n tempdf = download_raw[d]\n if 'combine_df' in locals() or 'combine_df' in globals():\n combine_df = combine_df.append(tempdf, ignore_index = True)\n else:\n combine_df = tempdf\n else:\n next\n \n output = combine_df.copy() \n if 'combine_df' in locals() or 'combine_df' in globals():\n del combine_df\n \n return output\n\n\n# =============================================================================\n# #input parameters\n# tablename = 'DailyAveragePoolPrice'\n# startdate = '2015-01-01'\n# enddate = '2021-03-31'\n# dateformat = '%Y-%m-%d'\n# contenttype = 'html'\n# #url = aeso_url_gen(tablename, contenttype, startdate, enddate, dateformat) \n# #df = aeso_download_one(url)\n# #pd.options.display.max_columns = df.shape[1]\n# #df.describe(include='all')\n# #dr = aeso_download_range(startdate, enddate, dateformat)\n# \n# #get table of downloaded data, sort by date\n# final_df = aeso_download_range(tablename, contenttype, startdate, enddate, dateformat)\n# final_df = final_df.sort_values(by='$/MWh_Date').reset_index()\n# final_df.to_csv('D:/JZP/test.csv')\n# =============================================================================\n\n\n", "sub_path": "electricity_scrape.py", "file_name": "electricity_scrape.py", "file_ext": "py", "file_size_in_byte": 6365, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "datetime.datetime", "line_number": 34, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pandas.read_html", "line_number": 48, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 68, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 105, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 107, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 118, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 119, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 121, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "558308078", "text": "import numpy as np\nimport sys\nfrom netCDF4 import Dataset\n\n''' ReadFingerprint.py\n\nProvides a function that reads in a fingerprint data file from the netCDF files created\nfrom the original spherical harmonics files.\n\nParameters:\nfname = File name that contains the lat/lon and fingerprint information\n\nReturn:\nf = The fingerprint coefficient along a lat/lon grid [nlat x nlon]\nlat = Vector of latitudes\nlon = Vector of longitudes\n\n'''\n\ndef ReadFingerprint(fname):\n\n\t# Open the fingerprint file\n\ttry:\n\t\tnc_fid = Dataset(fname, 'r')\n\texcept:\n\t\tprint(\"Cannot open fingerprint file: {0}\\n\".format(fname))\n\t\traise\n\t\n\t# Read in the fingerprint data\n\tfp = nc_fid.variables['fp'][:,:]\n\tfp_lats = nc_fid.variables['lat'][:]\n\tfp_lons = nc_fid.variables['lon'][:]\n\n\treturn(fp, fp_lats, fp_lons)\n\t", "sub_path": "modules/ipccar5/icesheets/ReadFingerprint.py", "file_name": "ReadFingerprint.py", "file_ext": "py", "file_size_in_byte": 785, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "netCDF4.Dataset", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "333924565", "text": "\"\"\"Tests for Terraform.\"\"\"\nimport glob\nimport os\n\nfrom send2trash import send2trash\n\nfrom integration_tests.integration_test import IntegrationTest\nfrom integration_tests.util import copy_file, execute_tests, import_tests\n\n\nclass Terraform(IntegrationTest):\n \"\"\"Base class for all Terraform testing.\"\"\"\n\n base_dir = os.path.abspath(os.path.dirname(__file__))\n template_dir = os.path.join(base_dir, 'templates')\n\n tf_test_dir = os.path.join(base_dir, 'terraform_test.tf')\n tf_state_dir = os.path.join(base_dir, 'tf_state.cfn')\n\n def copy_runway(self, template):\n \"\"\"Copy runway template to proper directory.\"\"\"\n template_file = os.path.join(self.template_dir, 'runway-{}.yml'.format(template))\n copy_file(template_file, os.path.join(self.base_dir, 'runway.yml'))\n\n def copy_template(self, template, name='main.tf'):\n \"\"\"Copy template to Terraform module folder.\"\"\"\n template_file = os.path.join(self.template_dir, template)\n copy_file(template_file, os.path.join(self.tf_test_dir, name))\n\n def clean(self):\n \"\"\"Clean up Terraform module directory.\"\"\"\n file_types = ('*.tf', '.terraform-version', '*.yml', 'local_backend')\n templates = []\n for file_type in file_types:\n templates.extend(glob.glob(os.path.join(self.tf_test_dir, file_type)))\n templates.extend(glob.glob(os.path.join(self.base_dir, file_type)))\n\n for template in templates:\n if os.path.isfile(template):\n self.logger.debug('send2trash: \"%s\"', template)\n send2trash(template)\n folders = ('.terraform', 'terraform.tfstate.d')\n for folder in folders:\n folder_path = os.path.join(self.tf_test_dir, folder)\n if os.path.isdir(folder_path):\n self.logger.debug('send2trash: \"%s\"', folder_path)\n send2trash(folder_path)\n\n def set_tf_version(self, version=11):\n \"\"\"Copy version file to module directory.\"\"\"\n version_file = 'tf-v{}.version'.format(version)\n self.copy_template(version_file, '.terraform-version')\n\n def run(self):\n \"\"\"Find all Terraform tests and run them.\"\"\"\n import_tests(self.logger, self.tests_dir, 'test_*')\n self.set_environment('dev')\n self.set_env_var('CI', '1')\n tests = [test(self.logger, self.environment)\n for test in Terraform.__subclasses__()]\n if not tests:\n raise Exception('No tests were found.')\n self.logger.debug('FOUND TESTS: %s', tests)\n err_count = execute_tests(tests, self.logger)\n assert err_count == 0 # assert that all subtests were successful\n return err_count\n\n def teardown(self):\n \"\"\"Teardown resources create during init.\"\"\"\n self.clean()\n", "sub_path": "integration_tests/test_terraform/test_terraform.py", "file_name": "test_terraform.py", "file_ext": "py", "file_size_in_byte": 2812, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "integration_tests.integration_test.IntegrationTest", "line_number": 11, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "integration_tests.util.copy_file", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "integration_tests.util.copy_file", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "send2trash.send2trash", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "send2trash.send2trash", "line_number": 47, "usage_type": "call"}, {"api_name": "integration_tests.util.import_tests", "line_number": 56, "usage_type": "call"}, {"api_name": "integration_tests.util.execute_tests", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "126709428", "text": "import pathlib\n\nfile_name = \"07.txt\"\ncurrent_dir = pathlib.Path(__file__).parent.absolute()\nfile_path = pathlib.Path(current_dir / \"data\" / file_name)\n\nwith open(file_path, \"r\") as file:\n bags = file.readlines()\n\nall_bags = {}\nfor bag in bags:\n outer, inner = bag.split(\"contain\")\n outer = ' '.join(outer.split()[:2])\n inner_dict = {}\n inner_contents = inner.split(',')\n for content in inner_contents:\n try:\n number = int(content.split()[0])\n except ValueError:\n number = 0\n colour = ' '.join(content.split()[1:3])\n inner_dict[colour]=number\n if outer not in all_bags.keys():\n all_bags[outer] = inner_dict\n\n\ndef find_contents(bag_colour):\n if bag_colour == \"shiny gold\":\n return True\n if bag_colour == \"other bags.\":\n return None\n bag_contents = all_bags[bag_colour].keys()\n content_returns = []\n for colour in bag_contents:\n content_returns.append(find_contents(colour))\n return any(content_returns)\n \n\nprint(all_bags)\ntotal = 0\nfor colour in all_bags.keys():\n contents = find_contents(colour)\n if contents and colour !=\"shiny gold\":\n print(f\"{colour} : {all_bags[colour]} := {contents}\")\n total += 1\n \nprint(total)\n", "sub_path": "07_1.py", "file_name": "07_1.py", "file_ext": "py", "file_size_in_byte": 1268, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 4, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "480745012", "text": "# Authors: \n# Trevor Perrin\n#\n# See the LICENSE file for legal information regarding use of this file.\n\n\"\"\"X.509 cert parsing, implemented using pure python.\"\"\"\nimport time\nimport calendar\nimport hashlib\nimport re\n\nfrom.errors import TLSUnsupportedError\nfrom .utils.asn1parser import ASN1Parser\nfrom .utils.keyfactory import _createPublicRSAKey\nfrom .utils.pem import *\nfrom .utils.cryptomath import *\n\nclass _X509(object):\n \"\"\"This class represents an X.509 certificate.\n\n @type x509: String\n @ivar x509: Either the original certificate or the converted binary\n \"\"\"\n\n def parse(self, s):\n \"\"\"Parse a PEM-encoded X.509 certificate.\n\n @type s: str\n @param s: A PEM-encoded X.509 certificate (i.e. a base64-encoded\n certificate wrapped with \"-----BEGIN CERTIFICATE-----\" and\n \"-----END CERTIFICATE-----\" tags).\n \"\"\"\n\n _bytes = dePem(s, \"CERTIFICATE\")\n self.parseBinary(_bytes)\n return self\n\n def parseBinary(self, _bytes):\n \"\"\"Parse a DER-encoded X.509 certificate.\n\n @type _bytes: str or L{bytearray} of unsigned _bytes\n @param _bytes: A DER-encoded X.509 certificate.\n \"\"\"\n self._bytes = bytearray(_bytes)\n p = ASN1Parser(_bytes)\n\n #Get the tbsCertificate\n tbsCertificateP = p.getChild(0)\n\n #Is the optional version field present?\n #This determines which index the key is at.\n if tbsCertificateP.value[0]==0xA0:\n subjectPublicKeyInfoIndex = 6\n else:\n subjectPublicKeyInfoIndex = 5\n\n #Get the subject\n self.subject = tbsCertificateP.getChildBytes(\\\n subjectPublicKeyInfoIndex - 1)\n\n #Get the subjectPublicKeyInfo\n subjectPublicKeyInfoP = tbsCertificateP.getChild(\\\n subjectPublicKeyInfoIndex)\n\n #Get the algorithm\n algorithmP = subjectPublicKeyInfoP.getChild(0)\n rsaOID = algorithmP.value\n if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:\n raise SyntaxError(\"Unrecognized AlgorithmIdentifier\")\n\n self.algorithm_oid = \"{ 1.2.840.113549.1.1.1 }\"\n self.algorithm = \"rsaEncryption\"\n\n #Get the subjectPublicKey\n subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1)\n\n #Adjust for BIT STRING encapsulation\n if (subjectPublicKeyP.value[0] !=0):\n raise SyntaxError()\n subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:])\n\n #Get the modulus and exponent\n modulusP = subjectPublicKeyP.getChild(0)\n publicExponentP = subjectPublicKeyP.getChild(1)\n\n #Decode them into numbers\n self.modulus = bytesToNumber(modulusP.value)\n self.publicExponent = bytesToNumber(publicExponentP.value)\n\n #Create a public key instance\n self._publicKey = _createPublicRSAKey(self.modulus, self.publicExponent)\n\n def getDER(self):\n \"Return the raw ASN.1 bytes for this certificate.\"\n return self._bytes\n\n def getPublicKeyInfo(self):\n \"Return information about this certificate's public key.\"\n return {\n 'algorithm_oid': self.algorithm_oid, \n 'algorithm': self.algorithm,\n 'modulus': self.modulus,\n 'public_exponent': self.publicExponent,\n 'key': self._publicKey,\n 'keylen': len(self._publicKey)\n }\n\n def extensions(self):\n raise TLSUnsupportedError(\"can't get extensions with this implementation\")\n\n def getVersion(self):\n \"Return integral version of this certificate.\"\n raise TLSUnsupportedError(\"can't get X509 version with this implementation\")\n\n def getNotBefore(self):\n raise TLSUnsupportedError(\"can't get effective dates with this implementation\")\n\n def getNotAfter(self):\n raise TLSUnsupportedError(\"can't get effective dates with this implementation\")\n\n def getIssuer(self):\n \"Return a dict with information about the certificate issuer.\"\n raise TLSUnsupportedError(\"can't get issuer with this implementation\")\n\n def getSubject(self):\n \"Return a dict with information about the certificate subject.\"\n raise TLSUnsupportedError(\"can't get subject with this implementation\")\n\n def getSignatureAlgorithm(self, as_oid):\n raise TLSUnsupportedError(\"can't get signature algorithm with this implementation\")\n\n def getSignatureValue(self):\n raise TLSUnsupportedError(\"can't get signature value with this implementation\")\n\n def getTBSCertificateData(self):\n \"Return the raw bytes of the ASN.1 DER tbsCertificate.\"\n raise TLSUnsupportedError(\"can't get tbsCertificate component with this implementation\")\n return der_encoder.encode(self.cert.getComponentByName('tbsCertificate'))\n\n def parseDigestInfo(self, data):\n \"\"\"Get the signature value field and decrypt and parse it to produce the\n digest info for this certificate.\"\"\"\n raise TLSUnsupportedError(\"can't parse DigestInfo with this implementation\")\n", "sub_path": "tlslite/x509simple.py", "file_name": "x509simple.py", "file_ext": "py", "file_size_in_byte": 5088, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "utils.asn1parser.ASN1Parser", "line_number": 45, "usage_type": "call"}, {"api_name": "utils.asn1parser.ASN1Parser", "line_number": 80, "usage_type": "call"}, {"api_name": "utils.keyfactory._createPublicRSAKey", "line_number": 91, "usage_type": "call"}, {"api_name": "errors.TLSUnsupportedError", "line_number": 109, "usage_type": "call"}, {"api_name": "errors.TLSUnsupportedError", "line_number": 113, "usage_type": "call"}, {"api_name": "errors.TLSUnsupportedError", "line_number": 116, "usage_type": "call"}, {"api_name": "errors.TLSUnsupportedError", "line_number": 119, "usage_type": "call"}, {"api_name": "errors.TLSUnsupportedError", "line_number": 123, "usage_type": "call"}, {"api_name": "errors.TLSUnsupportedError", "line_number": 127, "usage_type": "call"}, {"api_name": "errors.TLSUnsupportedError", "line_number": 130, "usage_type": "call"}, {"api_name": "errors.TLSUnsupportedError", "line_number": 133, "usage_type": "call"}, {"api_name": "errors.TLSUnsupportedError", "line_number": 137, "usage_type": "call"}, {"api_name": "errors.TLSUnsupportedError", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "367851919", "text": "\nfrom celery import Celery\nimport os\nimport django\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"dailyfresh.settings\")\ndjango.setup()\n\napp = Celery('celery_tasks.tasks', broker='redis://127.0.0.1:6379/7')\n\n\n@app.task\ndef send_email_active(toemail, username, token):\n subject = '用户邮箱验证'\n message = ''\n sender = settings.EMAIL_FROM\n reciver = [toemail]\n html = '

%s,恭喜你成为会员

请点击下面的网址完成验证并激活账号
' \\\n '' \\\n 'http://127.0.0.1:8000/user/active/%s' % (username, token, token)\n send_mail(subject,message,sender,reciver,html_message=html)\n\n # send_mail(邮件标题, 邮件文本, 发件人, 收件人列表, html_message=html邮件内容)\n\n", "sub_path": "celery_tasks/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 869, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.environ.setdefault", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.setup", "line_number": 9, "usage_type": "call"}, {"api_name": "celery.Celery", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.settings.EMAIL_FROM", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "django.core.mail.send_mail", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "102552011", "text": "# -*- coding: utf-8 -*-\n__author__ = 'guenther@eberl.se'\n\nimport locale\nlocale.setlocale(locale.LC_ALL, '')\ncode = locale.getpreferredencoding()\n\n# Version included in standard Python runtime seems not to work on Windows.\n# Unclear if working on Linux/OS X, untested.\n\n# Current Windows version of curses installed from http://www.lfd.uci.edu/~gohlke/pythonlibs/#curses (module version 2.2).\n\n# Running a script only works from the terminal, not directly in PyCharm IDE: \"Redirection is not supported.\"\n\n# Documentation:\n# https://docs.python.org/2/library/curses.html\n\n# Tutorials:\n# https://de.wikibooks.org/wiki/Python_unter_Linux:_Curses\n# https://docs.python.org/2/howto/curses.html\n\nimport curses\nstandard_screen = curses.initscr() # returns window object that covers the entire screen.\n\n# Apply settings.\n# curses.echo() # show pushed buttons (only normal characters, doesn't work on arrow keys).\ncurses.noecho() # don't show pushed buttons.\ncurses.cbreak() # no line buffer = directly react on key presses, don't wait on return key press.\nstandard_screen.keypad(1) # activate escape sequences.\n\n# Recolor terminal.\ncurses.start_color()\ncurses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE) # white font on blue background.\ncurses.init_pair(2, curses.COLOR_YELLOW, curses.COLOR_BLACK) # yellow font on black background.\nstandard_screen.bkgd(curses.color_pair(1)) # use color pair 1 for standard screen.\nstandard_screen.refresh()\n\n# Write something to a window.\nwin = curses.newwin(5, 25, 5, 4) # (height, width, vertical position, horizontal position).\nwin.bkgd(curses.color_pair(2)) # use color pair 2 for window.\nwin.box() # draw box around window.\nwin.addstr(2, 2, 'Hello world <[()]>') # add a string (vertical position, horizontal position, text string).\nwin.refresh() # nothing appears on the screen until a refresh is called.\n\n# Write something to another window.\nwin_two = curses.newwin(3, 25, 5, 40)\nwin_two.bkgd(curses.color_pair(1))\nwin_two.box()\nwin_two.addstr(1, 2, 'Test 2 üöäß', curses.A_STANDOUT)\nwin_two.refresh()\n\n# Wait for user input.\nc = standard_screen.getch()\n\n# Reset everything and exit.\ncurses.nocbreak() # reactivate line buffer.\nstandard_screen.keypad(0)\ncurses.echo() # show pushed buttons.\ncurses.endwin() # exit.\n", "sub_path": "tryout_curses.py", "file_name": "tryout_curses.py", "file_ext": "py", "file_size_in_byte": 2276, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "locale.setlocale", "line_number": 5, "usage_type": "call"}, {"api_name": "locale.LC_ALL", "line_number": 5, "usage_type": "attribute"}, {"api_name": "locale.getpreferredencoding", "line_number": 6, "usage_type": "call"}, {"api_name": "curses.initscr", "line_number": 23, "usage_type": "call"}, {"api_name": "curses.noecho", "line_number": 27, "usage_type": "call"}, {"api_name": "curses.cbreak", "line_number": 28, "usage_type": "call"}, {"api_name": "curses.start_color", "line_number": 32, "usage_type": "call"}, {"api_name": "curses.init_pair", "line_number": 33, "usage_type": "call"}, {"api_name": "curses.COLOR_WHITE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "curses.COLOR_BLUE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 34, "usage_type": "call"}, {"api_name": "curses.COLOR_YELLOW", "line_number": 34, "usage_type": "attribute"}, {"api_name": "curses.COLOR_BLACK", "line_number": 34, "usage_type": "attribute"}, {"api_name": "curses.color_pair", "line_number": 35, "usage_type": "call"}, {"api_name": "curses.newwin", "line_number": 39, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 40, "usage_type": "call"}, {"api_name": "curses.newwin", "line_number": 46, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 47, "usage_type": "call"}, {"api_name": "curses.A_STANDOUT", "line_number": 49, "usage_type": "attribute"}, {"api_name": "curses.nocbreak", "line_number": 56, "usage_type": "call"}, {"api_name": "curses.echo", "line_number": 58, "usage_type": "call"}, {"api_name": "curses.endwin", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "652194584", "text": "\"\"\"\nUse this script to create random subsets of fastq files both single and paired end.\n\"\"\"\nimport sys\nimport argparse\nimport random\n\ndef main(type, file, n, eq):\n\n #Function for progress_bar\n def progress_bar(count, total, complete=''):\n bar_len = 50\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n if complete == 'yes':\n sys.stdout.write('[%s] %s%s' % (bar, percents, '%') + ' ' * 20 + '\\n')\n sys.stdout.flush()\n else:\n sys.stdout.write('[%s] %s%s' % (bar, percents, '%') + ' ' * 20 + '\\r')\n sys.stdout.flush()\n\n #Randomization of fastq with partitioning stipulations\n if eq:\n num_line = []\n prog = 0\n for fl in file:\n with open(fl) as f:\n for i, l in enumerate(f):\n if i % 200000 == 0:\n sys.stdout.write('Determining number of reads in file... [%s]\\r' % '/-\\|'[prog % 4])\n sys.stdout.flush()\n prog += 1\n pass\n num_line.append(i+1)\n sys.stdout.write('Determining number of reads in file... Complete!\\n')\n\n #Check if same number of entries exist in paired-end\n if type == 'pe':\n if num_line[0] != num_line[1]:\n sys.stdout.write('Error: Paired-end files do not have equal number of reads!\\n')\n sys.exit()\n #Determine number of entries that will be partitioned\n num_entry = num_line[0]/4\n if num_entry < n:\n sys.stdout.write('Error: Number of entries cannot fit into desired number of partitions!\\n')\n sys.exit()\n num_entry_per_part = num_entry/n\n\n if type == 'pe':\n #Setup output files and completion dictionary\n completion = {}\n for partition in range(n):\n open('partition_'+str(partition)+'_read1.fastq','w')\n open('partition_'+str(partition)+'_read2.fastq','w')\n completion[partition] = 0\n\n #Begin parsing fastq files\n line = 1\n with open(file[0]) as read_1, open(file[1]) as read_2:\n while line <= num_entry_per_part * n:\n\n #Progress Bar\n progress_bar(line, num_entry)\n\n #Read lines\n r1 = read_1.next()\n r2 = read_2.next()\n\n #Iterate through sequence blocks within each file\n if r1[0] == '@':\n #Make sure that read_1 and read_2 are paired\n if r2[0] == '@':\n pass\n else:\n sys.stdout.write('Error: The fastq file do not seem to be paired!')\n sys.exit()\n #Retreive the lines within the block for read_1\n r1_seq = read_1.next()\n r1_ids = read_1.next()\n r1_qty = read_1.next()\n #Retreive the lines within the block for read_2\n r2_seq = read_2.next()\n r2_ids = read_2.next()\n r2_qty = read_2.next()\n\n #Partition selection\n part_select = random.randrange(0,n)\n while completion[part_select] == num_entry_per_part:\n part_select = random.randrange(0,n)\n if sum(completion.values()) == n * num_entry_per_part:\n break\n\n #Write fastq block to partition\n f = open('partition_'+str(part_select)+'_read1.fastq','a')\n for ln in [r1, r1_seq, r1_ids, r1_qty]:\n f.write(ln)\n f = open('partition_'+str(part_select)+'_read2.fastq','a')\n for ln in [r2, r2_seq, r2_ids, r2_qty]:\n f.write(ln)\n\n #Push num_entry by 1\n completion[part_select] += 1\n\n line += 1\n\n #Close all the files\n for partition in range(n):\n f = open('partition_'+str(partition)+'_read1.fastq','a')\n f.close()\n f = open('partition_'+str(partition)+'_read2.fastq','a')\n f.close()\n\n if type == 'se':\n # Setup output files and completion dictionary\n completion = {}\n for partition in range(n):\n open('partition_' + str(partition) + '.fastq', 'w')\n completion[partition] = 0\n\n # Begin parsing fastq files\n line = 1\n with open(file[0]) as read_1:\n while line <= num_entry:\n\n # Progress Bar\n progress_bar(line, num_entry)\n\n # Read lines\n r1 = read_1.next()\n\n # Iterate through sequence blocks within each file\n if r1[0] == '@':\n # Retreive the lines within the block for read_1\n r1_seq = read_1.next()\n r1_ids = read_1.next()\n r1_qty = read_1.next()\n\n # Partition selection\n part_select = random.randrange(0, n)\n while completion[part_select] == num_entry_per_part:\n part_select = random.randrange(0, n)\n if sum(completion.values()) == n * num_entry_per_part:\n break\n\n # Write fastq block to partition\n f = open('partition_' + str(part_select) + '.fastq', 'a')\n for ln in [r1, r1_seq, r1_ids, r1_qty]:\n f.write(ln)\n\n # Push num_entry by 1\n completion[part_select] += 1\n\n line += 1\n\n # Close all the files\n for partition in range(n):\n f = open('partition_' + str(partition) + '.fastq', 'a')\n f.close()\n\n else:\n pass\n\n progress_bar(1, 1, complete='yes')\n sys.stdout.write('Fastq randomization Complete!\\n')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Creates random subsets of fasta files, both single- and paired-end reads.',\n prog='fastq_randomize.py')\n optional = parser._action_groups.pop()\n required = parser.add_argument_group('required arguments')\n required.add_argument('-type', type=str, metavar='TYPE', help='type of fasta file; se = single-end, pe = paired-end', required=True)\n required.add_argument('-fl', type=str, metavar='FILE', nargs='+', help='path to fasta file, separate paired-end files by a space', required=True)\n optional.add_argument('-n', type=int, default=2, metavar='INT', help='specify number of subsets to be formed (default 2)')\n optional.add_argument('-eq', action='store_true', help='use this flag to force equal number of entries among subsets')\n parser._action_groups.append(optional)\n args = parser.parse_args()\n\n if args.type == 'se':\n if len(args.fl) > 1:\n parser.error('argument -fl: expected only one argument')\n else:\n pass\n elif args.type == 'pe':\n if len(args.fl) == 1:\n parser.error('argument -fl: expected two arguments')\n else:\n pass\n else:\n parser.error('argument -type: must use either \"se\" or \"pe\"')\n\n if args.n <= 1:\n parser.error('argument -n: must be >= 2')\n\n main(args.type, args.fl, args.n, args.eq)", "sub_path": "fastq_randomize.py", "file_name": "fastq_randomize.py", "file_ext": "py", "file_size_in_byte": 7958, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sys.stdout.write", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 38, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 48, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 78, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 79, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 90, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 92, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 142, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 144, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 167, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 167, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 170, "usage_type": "call"}]} +{"seq_id": "367091504", "text": "import sqlite3, json\nfrom sqlite3 import Error\n\nclass DatabaseManager():\n \n def __init__(self):\n self.tracked_games = []\n self.db_file_name = \"db/voxel.db\"\n self.connection = self.connect(self.db_file_name)\n\n def updategames(self):\n try:\n with open('server_config.json', 'r') as server_settings:\n games = server_settings['tracked_games']\n self.tracked_games = games\n return games\n except Exception as ex:\n print(ex)\n return False\n\n def games(self):\n return self.updategames()\n\n def connect(self, db_name):\n try:\n connection = sqlite3.connect(db_name)\n return connection\n except Error as e:\n print(e)\n except Exception as ex:\n print(ex)\n \n def insert(self, data):\n try:\n \n self.connection.cursor().execute('INSERT INTO data (game, date, time, time_played, bayid) VALUES (?,?,?,?,?)',[data['game'], data['date'], data['time'], data['session'], data['bayid']])\n self.connection.commit()\n return True\n except Exception as e:\n print(\"\")\n print(e)\n print(\"INSERT ERROR MESSAGE\")\n return False\n\n\ndef main():\n\n data = {\n \"bayid\":\"bay3\",\n \"date\": \"7-23-2019\",\n \"time\": \"11:04:00\",\n \"session\": 30,\n \"game\": \"Spotify.exe\"\n }\n\n manager = DatabaseManager()\n print(manager.insert(data))\n\n \n\n\nif __name__ == '__main__':\n main()\n \n", "sub_path": "DataBaseManager.py", "file_name": "DataBaseManager.py", "file_ext": "py", "file_size_in_byte": 1587, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sqlite3.connect", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "212163283", "text": "from aiogram import types\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters import Text\nfrom datetime import datetime\n\n\nfrom misc import dp\nfrom states import NewAdminMessage\nfrom keyboards import *\nfrom db import Schedule, User, _exist_user, get_admin_ids, _exist_schedule_to_group, get_user_by_user_id, \\\n\tget_all_schedule_by_day_of_week_by_group_id\n\n\nnumerator = True\n\n\n@dp.message_handler(commands=[\"menu\"])\nasync def menu(msg: types.Message):\n\tif msg.from_user.id in get_admin_ids():\n\t\tawait msg.answer(\"Открываю меню...\", reply_markup=admin_menu_kb)\n\telif _exist_user(msg.from_user.id):\n\t\tawait msg.answer(\"Открываю меню...\", reply_markup=menu_kb)\n\telse:\n\t\tawait msg.answer(\"Для начала, вам нужно зарегистироваться!\\nПропишите /start чтобы пройти регистрацию\",\n\t\t reply_markup=ReplyKeyboardRemove())\n\n\ndef get_mess_text(day: str, items: list[Schedule]) -> str:\n\tmess_text = f\"Расписание на {day}:\\n\"\n\tfor item in items:\n\t\tmess_text += f\"\\t{item.time}: {item.name}\"\n\t\tif item.cabinet != 0:\n\t\t\tmess_text += f\": {item.cabinet}\\n\"\n\t\telse:\n\t\t\tmess_text += \"\\n\"\n\treturn mess_text\n\n\ndef get_today_schedule(user: User):\n\tnow = datetime.now()\n\tdate = now.date()\n\tday_of_week = date.weekday()\n\treturn get_all_schedule_by_day_of_week_by_group_id(day_of_week, user.group_id, numerator)\n\n\ndef get_tomorrow_schedule(user: User):\n\tnow = datetime.now()\n\tdate = now.date()\n\tday_of_week = date.weekday()\n\treturn get_all_schedule_by_day_of_week_by_group_id(day_of_week + 1, user.group_id, numerator)\n\n\n@dp.message_handler(Text(equals=menu_text[0]))\nasync def schedule_today(msg: types.Message):\n\tuser = get_user_by_user_id(msg.from_user.id)\n\tif not _exist_schedule_to_group(user.group_id):\n\t\treturn await msg.answer(\"Упсс.. Похоже расписания для вашей группы еще нет ;(\")\n\tschedule = get_today_schedule(user)\n\tawait msg.answer(get_mess_text(\"сегодня\", schedule), parse_mode=types.ParseMode.HTML)\n\n\n@dp.message_handler(Text(equals=menu_text[1]))\nasync def schedule_tomorrow(msg: types.Message):\n\tuser = get_user_by_user_id(msg.from_user.id)\n\tif not _exist_schedule_to_group(user.group_id):\n\t\treturn await msg.answer(\"Упсс.. Похоже расписания для вашей группы еще нет ;(\")\n\tschedule = get_tomorrow_schedule(user)\n\tawait msg.answer(get_mess_text(\"завтра\", schedule), parse_mode=types.ParseMode.HTML)\n\n\n@dp.message_handler(Text(equals=menu_text[2]))\nasync def schedule_week(msg: types.Message):\n\tuser = get_user_by_user_id(msg.from_user.id)\n\tif not _exist_schedule_to_group(user.group_id):\n\t\treturn await msg.answer(\"Упсс.. Похоже расписания для вашей группы еще нет ;(\")\n\tawait msg.answer(\"Функция еще не реализована\")\n\n\n@dp.message_handler(Text(equals=menu_text[3]))\nasync def schedule_next_week(msg: types.Message):\n\tuser = get_user_by_user_id(msg.from_user.id)\n\tif not _exist_schedule_to_group(user.group_id):\n\t\treturn await msg.answer(\"Упсс.. Похоже расписания для вашей группы еще нет ;(\")\n\tawait msg.answer(\"Функция еще не реализована\")\n\t\n\t\n@dp.message_handler(Text(equals=menu_text[4]))\nasync def schedule_text_to_admin(msg: types.Message):\n\tawait msg.answer(\"Введите текст сообщения: \", reply_markup=ReplyKeyboardRemove())\n\tawait NewAdminMessage.text.set()\n\n\n@dp.message_handler(state=NewAdminMessage.text)\nasync def schedule_forward_to_admin(msg: types.Message, state: FSMContext):\n\tawait msg.forward(645798848)\n\tawait state.finish()\n\n\n@dp.message_handler(Text(equals=admin_menu_text[5]))\nasync def admin_menu(msg: types.Message):\n\tawait msg.answer(\"Открываю админ панель...\", reply_markup=admin_panel_kb)\n", "sub_path": "menu.py", "file_name": "menu.py", "file_ext": "py", "file_size_in_byte": 3927, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "aiogram.types.Message", "line_number": 18, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 18, "usage_type": "name"}, {"api_name": "db.get_admin_ids", "line_number": 19, "usage_type": "call"}, {"api_name": "db._exist_user", "line_number": 21, "usage_type": "call"}, {"api_name": "misc.dp.message_handler", "line_number": 17, "usage_type": "call"}, {"api_name": "misc.dp", "line_number": 17, "usage_type": "name"}, {"api_name": "db.Schedule", "line_number": 28, "usage_type": "name"}, {"api_name": "db.User", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "name"}, {"api_name": "db.get_all_schedule_by_day_of_week_by_group_id", "line_number": 43, "usage_type": "call"}, {"api_name": "db.User", "line_number": 46, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}, {"api_name": "db.get_all_schedule_by_day_of_week_by_group_id", "line_number": 50, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 54, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 54, "usage_type": "name"}, {"api_name": "db.get_user_by_user_id", "line_number": 55, "usage_type": "call"}, {"api_name": "db._exist_schedule_to_group", "line_number": 56, "usage_type": "call"}, {"api_name": "aiogram.types.ParseMode", "line_number": 59, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 59, "usage_type": "name"}, {"api_name": "misc.dp.message_handler", "line_number": 53, "usage_type": "call"}, {"api_name": "misc.dp", "line_number": 53, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Text", "line_number": 53, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 63, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 63, "usage_type": "name"}, {"api_name": "db.get_user_by_user_id", "line_number": 64, "usage_type": "call"}, {"api_name": "db._exist_schedule_to_group", "line_number": 65, "usage_type": "call"}, {"api_name": "aiogram.types.ParseMode", "line_number": 68, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 68, "usage_type": "name"}, {"api_name": "misc.dp.message_handler", "line_number": 62, "usage_type": "call"}, {"api_name": "misc.dp", "line_number": 62, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Text", "line_number": 62, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 72, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 72, "usage_type": "name"}, {"api_name": "db.get_user_by_user_id", "line_number": 73, "usage_type": "call"}, {"api_name": "db._exist_schedule_to_group", "line_number": 74, "usage_type": "call"}, {"api_name": "misc.dp.message_handler", "line_number": 71, "usage_type": "call"}, {"api_name": "misc.dp", "line_number": 71, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Text", "line_number": 71, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 80, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 80, "usage_type": "name"}, {"api_name": "db.get_user_by_user_id", "line_number": 81, "usage_type": "call"}, {"api_name": "db._exist_schedule_to_group", "line_number": 82, "usage_type": "call"}, {"api_name": "misc.dp.message_handler", "line_number": 79, "usage_type": "call"}, {"api_name": "misc.dp", "line_number": 79, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Text", "line_number": 79, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 88, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 88, "usage_type": "name"}, {"api_name": "states.NewAdminMessage.text.set", "line_number": 90, "usage_type": "call"}, {"api_name": "states.NewAdminMessage.text", "line_number": 90, "usage_type": "attribute"}, {"api_name": "states.NewAdminMessage", "line_number": 90, "usage_type": "name"}, {"api_name": "misc.dp.message_handler", "line_number": 87, "usage_type": "call"}, {"api_name": "misc.dp", "line_number": 87, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Text", "line_number": 87, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 94, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 94, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 94, "usage_type": "name"}, {"api_name": "misc.dp.message_handler", "line_number": 93, "usage_type": "call"}, {"api_name": "misc.dp", "line_number": 93, "usage_type": "name"}, {"api_name": "states.NewAdminMessage.text", "line_number": 93, "usage_type": "attribute"}, {"api_name": "states.NewAdminMessage", "line_number": 93, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 100, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 100, "usage_type": "name"}, {"api_name": "misc.dp.message_handler", "line_number": 99, "usage_type": "call"}, {"api_name": "misc.dp", "line_number": 99, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Text", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "407515589", "text": "import subprocess\nimport csv\nfrom datetime import datetime\n\nrounds_to_run = 100\n\n\n# header = [\n# 'scenario'\n# , 'players'\n# , 'strikes'\n# , 'runs'\n# , 'max_player_count'\n# , 'strikes_2_1'\n# , 'strikes_2_2'\n# , 'strikes_3_1'\n# , 'strikes_3_2'\n# , 'strikes_3_3'\n# , 'strikes_4_1'\n# , 'strikes_4_2'\n# , 'strikes_4_3'\n# , 'strikes_4_4'\n# , 'variablestring'\n# , 'avg_rounds'\n# , 'avg_games_per_round'\n# , 'total_meaningful_games'\n# , 'total_meaningful_games_rounded'\n# , 'extract_datetime'\n# ]\n\n\noutputfile = 'C:/Users/corey.hulse/Documents/Personal/FairStrike/strikeoutput.csv'\n\n# with open(outputfile, 'w', encoding='UTF8', newline=\"\") as f:\n# writer = csv.writer(f)\n#\n# writer.writerow(header)\n\n\nfilename = 'C:/Users/corey.hulse/Documents/Personal/FairStrike/strikescenarios.csv'\n\nwith open(filename, 'r') as csvfile:\n datareader = csv.reader(csvfile)\n next(csvfile)\n\n for row in datareader:\n scenario = row[0]\n scenario_file_name = scenario.replace(\" \", \"_\")\n outputfile = 'C:/Users/corey.hulse/Documents/Personal/FairStrike/strikeoutput_' + scenario_file_name + '.csv'\n p = row[1]\n s = row[2]\n r = rounds_to_run\n g = row[3]\n c21 = row[4]\n c22 = row[5]\n c31 = row[6]\n c32 = row[7]\n c33 = row[8]\n c41 = row[9]\n c42 = row[10]\n c43 = row[11]\n c44 = row[12]\n now = datetime.now()\n\n variablestring = \" -p\" + str(p) + \" -s\" + str(s) + \" -r\" + str(r) + \" -g\" + str(g) + \" -x -c \" + str(c21) + \" \" + str(c22) + \" \" + str(c31) + \" \" + str(c32) + \" \" + str(c33) + \" \" + str(c41) + \" \" + str(c42) + \" \" + str(c43) + \" \" + str(c44)\n\n variableoutput = str(p) + \",\" + str(s) + \",\" + str(r) + \",\" + str(g) + \",\" + str(c21) + \",\" + str(c22) + \",\" + str(c31) + \",\" + str(c32) + \",\" + str(c33) + \",\" + str(c41) + \",\" + str(c42) + \",\" + str(c43) + \",\" + str(c44) + \",\"\n\n location = \"C:/Users/corey.hulse/Documents/Personal/FairStrike/FairStrikeSimV2.exe\"\n\n subprocessrun = location + variablestring\n\n subprocess.call(subprocessrun, shell=True)\n subprocessoutput = subprocess.check_output(subprocessrun, shell=True)\n\n subprocessoutput = subprocessoutput.decode(\"utf-8\")\n\n subprocesssplit = subprocessoutput.split(\"\\t\")\n\n subprocessoutput = subprocessoutput.replace(\"\\t\", \",\")\n\n subprocesssplitround = round(float(subprocesssplit[3]))\n\n commaoutput = variableoutput + subprocessoutput\n\n # scenario_key = str(scenario) + \"_\" + str(s)\n #\n # if scenario_key = scenario_key_prior:\n # if subprocesssplitround >= subprocesssplitround_prior:\n # subprocesssplitroundstep = subprocesssplitround\n # else:\n # subprocesssplitroundstep = subprocesssplitround_prior\n\n\n #print(commaoutput)\n\n data = [\n scenario\n , p\n , s\n , r\n , g\n , c21\n , c22\n , c31\n , c32\n , c33\n , c41\n , c42\n , c43\n , c44\n , variablestring\n , subprocesssplit[1]\n , subprocesssplit[2]\n , float(subprocesssplit[3])\n , subprocesssplitround\n , now\n ]\n\n\n\n with open(outputfile, 'a', encoding='UTF8', newline=\"\") as f:\n writer = csv.writer(f)\n\n # write the data\n writer.writerow(data)\n\n # Key Prior Scenario\n scenario_key_prior = str(scenario) + \"_\" + str(s)\n\n subprocesssplitround_prior = round(float(subprocesssplit[3]))\n", "sub_path": "fairstrikesim/fairstrikesim.py", "file_name": "fairstrikesim.py", "file_ext": "py", "file_size_in_byte": 3565, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "csv.reader", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "name"}, {"api_name": "subprocess.call", "line_number": 73, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 74, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "527241395", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom PIL import Image, ImageDraw, ImageFont\n\n\ndef add_num(picPath, num):\n img = Image.open(picPath)\n xSize, ySize = img.size\n fontsize = int(ySize / 4)\n position = xSize - fontsize\n myFont = ImageFont.truetype('Symbol.ttf', size=fontsize)\n ImageDraw.Draw(img).text((position, 0), str(num), font=myFont, fill='red')\n img.save('icon_with_num.jpg')\n\n\nif __name__ == '__main__':\n picPath = \"test.jpg\"\n num = 3\n add_num(picPath, num)\n", "sub_path": "python/someexample/draw/draw.py", "file_name": "draw.py", "file_ext": "py", "file_size_in_byte": 505, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "PIL.Image.open", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 8, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 12, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 13, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "304631995", "text": "import datetime\nimport logging\nimport os\nimport random\nimport re\nimport subprocess\nimport signal\nimport sys\nimport tempfile\nimport time\n\nimport luigi\nimport luigi.format\nimport luigi.hdfs\nfrom luigi import configuration\n\n\nlogger = logging.getLogger('luigi-interface')\n\n\"\"\"\nApache Spark on YARN support\n\nExample configuration section in client.cfg:\n\n[spark]\n# assembly jar containing spark and dependencies\nspark-jar: /usr/share/spark/jars/spark-assembly-0.8.1-incubating-hadoop2.2.0.jar\n\n# spark script to invoke\nspark-class: /usr/share/spark/spark-class\n\n# directory containing the (client side) configuration files for the hadoop cluster\nhadoop-conf-dir: /etc/hadoop/conf\n\n\"\"\"\n\n\nclass SparkRunContext(object):\n def __init__(self):\n self.app_id = None\n\n def __enter__(self):\n self.__old_signal = signal.getsignal(signal.SIGTERM)\n signal.signal(signal.SIGTERM, self.kill_job)\n return self\n\n def kill_job(self, captured_signal=None, stack_frame=None):\n if self.app_id:\n done = False\n while not done:\n try:\n logger.info('Job interrupted, killing application %s', self.app_id)\n subprocess.call(['yarn', 'application', '-kill', self.app_id])\n done = True\n except KeyboardInterrupt:\n continue\n\n if captured_signal is not None:\n # adding 128 gives the exit code corresponding to a signal\n sys.exit(128 + captured_signal)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type is KeyboardInterrupt:\n self.kill_job()\n signal.signal(signal.SIGTERM, self.__old_signal)\n\n\nclass SparkJobError(RuntimeError):\n def __init__(self, message, out=None, err=None):\n super(SparkJobError, self).__init__(message, out, err)\n self.message = message\n self.out = out\n self.err = err\n\n\nclass SparkJob(luigi.Task):\n spark_workers = None\n spark_master_memory = None\n spark_worker_memory = None\n queue = luigi.Parameter(is_global=True, default=None, significant=False)\n temp_hadoop_output_file = None\n\n def requires_local(self):\n ''' Default impl - override this method if you need any local input to be accessible in init() '''\n return []\n\n def requires_hadoop(self):\n return self.requires() # default impl\n\n def input_local(self):\n return luigi.task.getpaths(self.requires_local())\n\n def input(self):\n return luigi.task.getpaths(self.requires())\n\n def deps(self):\n # Overrides the default implementation\n return luigi.task.flatten(self.requires_hadoop()) + luigi.task.flatten(self.requires_local())\n\n def jar(self):\n raise NotImplementedError(\"subclass should define jar containing job_class\")\n\n def job_class(self):\n raise NotImplementedError(\"subclass should define Spark job_class\")\n\n def job_args(self):\n return []\n\n def output(self):\n raise NotImplementedError(\"subclass should define HDFS output path\")\n\n def run(self):\n original_output_path = self.output().path\n path_no_slash = original_output_path[:-2] if original_output_path.endswith('/*') else original_output_path\n path_no_slash = original_output_path[:-1] if original_output_path[-1] == '/' else path_no_slash\n tmp_output = luigi.hdfs.HdfsTarget(path_no_slash + '-luigi-tmp-%09d' % random.randrange(0, 1e10))\n\n args = ['org.apache.spark.deploy.yarn.Client']\n args += ['--jar', self.jar()]\n args += ['--class', self.job_class()]\n\n for a in self.job_args():\n if a == self.output().path:\n # pass temporary output path to job args\n logger.info(\"Using temp path: {0} for path {1}\".format(tmp_output.path, original_output_path))\n args += ['--args', tmp_output.path]\n else:\n args += ['--args', str(a)]\n\n if self.spark_workers is not None:\n args += ['--num-workers', self.spark_workers]\n\n if self.spark_master_memory is not None:\n args += ['--master-memory', self.spark_master_memory]\n\n if self.spark_worker_memory is not None:\n args += ['--worker-memory', self.spark_worker_memory]\n\n queue = self.queue\n if queue is not None:\n args += ['--queue', queue]\n\n env = os.environ.copy()\n env['SPARK_JAR'] = configuration.get_config().get('spark', 'spark-jar')\n env['HADOOP_CONF_DIR'] = configuration.get_config().get('spark', 'hadoop-conf-dir')\n env['MASTER'] = 'yarn-client'\n spark_class = configuration.get_config().get('spark', 'spark-class')\n\n temp_stderr = tempfile.TemporaryFile()\n logger.info('Running: %s %s' % (spark_class, ' '.join(args)))\n proc = subprocess.Popen([spark_class] + args, stdout=subprocess.PIPE,\n stderr=temp_stderr, env=env, close_fds=True)\n\n return_code, final_state, app_id = self.track_progress(proc)\n if return_code == 0 and final_state != 'FAILED':\n tmp_output.move(path_no_slash)\n elif final_state == 'FAILED':\n raise SparkJobError('Spark job failed: see yarn logs for %s' % app_id)\n else:\n temp_stderr.seek(0)\n errors = temp_stderr.readlines()\n logger.error(errors)\n raise SparkJobError('Spark job failed', err=errors)\n\n def track_progress(self, proc):\n # The Spark client currently outputs a multiline status to stdout every second\n # while the application is running. This instead captures status data and updates\n # a single line of output until the application finishes.\n app_id = None\n app_status = 'N/A'\n url = 'N/A'\n final_state = None\n start = time.time()\n with SparkRunContext() as context:\n while proc.poll() is None:\n s = proc.stdout.readline()\n app_id_s = re.compile('application identifier: (\\w+)').search(s)\n if app_id_s:\n app_id = app_id_s.group(1)\n context.app_id = app_id\n app_status_s = re.compile('yarnAppState: (\\w+)').search(s)\n if app_status_s:\n app_status = app_status_s.group(1)\n url_s = re.compile('appTrackingUrl: (.+)').search(s)\n if url_s:\n url = url_s.group(1)\n final_state_s = re.compile('distributedFinalState: (\\w+)').search(s)\n if final_state_s:\n final_state = final_state_s.group(1)\n if not app_id:\n logger.info(s.strip())\n else:\n elapsed_mins, elapsed_secs = divmod(datetime.timedelta(seconds=time.time() - start).seconds, 60)\n status = '[%0d:%02d] Status: %s Tracking: %s' % (elapsed_mins, elapsed_secs, app_status, url)\n sys.stdout.write(\"\\r\\x1b[K\" + status)\n sys.stdout.flush()\n logger.info(proc.communicate()[0])\n return proc.returncode, final_state, app_id\n", "sub_path": "luigi/contrib/spark.py", "file_name": "spark.py", "file_ext": "py", "file_size_in_byte": 7188, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "signal.getsignal", "line_number": 43, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 43, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 44, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 44, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 60, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 65, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 65, "usage_type": "attribute"}, {"api_name": "luigi.Task", "line_number": 76, "usage_type": "attribute"}, {"api_name": "luigi.Parameter", "line_number": 80, "usage_type": "call"}, {"api_name": "luigi.task.getpaths", "line_number": 91, "usage_type": "call"}, {"api_name": "luigi.task", "line_number": 91, "usage_type": "attribute"}, {"api_name": "luigi.task.getpaths", "line_number": 94, "usage_type": "call"}, {"api_name": "luigi.task", "line_number": 94, "usage_type": "attribute"}, {"api_name": "luigi.task.flatten", "line_number": 98, "usage_type": "call"}, {"api_name": "luigi.task", "line_number": 98, "usage_type": "attribute"}, {"api_name": "luigi.hdfs.HdfsTarget", "line_number": 116, "usage_type": "call"}, {"api_name": "luigi.hdfs", "line_number": 116, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 116, "usage_type": "call"}, {"api_name": "os.environ.copy", "line_number": 143, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 143, "usage_type": "attribute"}, {"api_name": "luigi.configuration.get_config", "line_number": 144, "usage_type": "call"}, {"api_name": "luigi.configuration", "line_number": 144, "usage_type": "name"}, {"api_name": "luigi.configuration.get_config", "line_number": 145, "usage_type": "call"}, {"api_name": "luigi.configuration", "line_number": 145, "usage_type": "name"}, {"api_name": "luigi.configuration.get_config", "line_number": 147, "usage_type": "call"}, {"api_name": "luigi.configuration", "line_number": 147, "usage_type": "name"}, {"api_name": "tempfile.TemporaryFile", "line_number": 149, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 151, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 151, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 173, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 177, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 181, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 184, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 187, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 193, "usage_type": "call"}, {"api_name": "time.time", "line_number": 193, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 195, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 195, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 196, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 196, "usage_type": "attribute"}]} +{"seq_id": "654360417", "text": "from bs4 import BeautifulSoup\r\nimport requests\r\nimport openpyxl\r\nimport requests \r\n\r\nimport json\r\nfrom types import SimpleNamespace\r\n\r\nmy_path = \"Documents/Australian_All_Postcodes_11022021.xlsx\"\r\npostcode_wb_obj = openpyxl.load_workbook(my_path)\r\npostcode_sheet_obj = postcode_wb_obj.active\r\npostcode_max_col = postcode_sheet_obj.max_column\r\npostcode_max_row = postcode_sheet_obj.max_row\r\n\r\nmy_path = \"Documents/DonutKing_AUS_06052021.xlsx\"\r\nwb_obj_w = openpyxl.load_workbook(my_path)\r\nsheet_obj_w = wb_obj_w.active\r\n\r\nclass OBJ:\r\n Title = \"\"\r\n Address = \"\"\r\n FullAddress = \"\"\r\n Suburb = \"\"\r\n State = \"\"\r\n City = \"\"\r\n Country = \"\"\r\n Postcode = \"\"\r\n Latitude = \"\"\r\n Longitude = \"\"\r\n\r\nlistOBJ = []\r\nlistError = []\r\n\r\nfor x in range(2, postcode_max_row+1):\r\n URL = 'https://www.donutking.com.au/wp/wp-admin/admin-ajax.php?action=store_search&lat=' + str(postcode_sheet_obj.cell(row=x, column=3).value) + '&lng=' + str(postcode_sheet_obj.cell(row=x, column=4).value) + '&max_results=50&search_radius=500&autoload=1'\r\n try:\r\n print(URL)\r\n\r\n response = requests.get(URL).json()\r\n\r\n if response.status_code == 200:\r\n\r\n for y in range(len(response)):\r\n try:\r\n x = response[y]\r\n\r\n obj = OBJ()\r\n obj.Title = str(x['store'])\r\n obj.Address = str(x['address'])\r\n\r\n try:\r\n obj.FullAddress = str(x['address2'])\r\n except:\r\n obj.FullAddress = ''\r\n\r\n obj.City = str(x['city'])\r\n obj.State = str(x['state'])\r\n obj.Postcode = str(x['zip'])\r\n obj.Country = str(x['country'])\r\n obj.Latitude = str(x['lat'])\r\n obj.Longitude = str(x['lng'])\r\n\r\n print(obj.Title + \" | \" + obj.Address + \" | \" + str(obj.Latitude) + \" | \" + str(obj.Longitude))\r\n\r\n result = False\r\n\r\n if len(listOBJ) > 0:\r\n for i in range(len(listOBJ)):\r\n if (str(obj.Title) == str(listOBJ[i].Title) and str(obj.Address) == str(listOBJ[i].Address) and str(obj.Latitude) == str(listOBJ[i].Latitude) and str(obj.Longitude) == str(listOBJ[i].Longitude)):\r\n result = True\r\n break\r\n\r\n if result == False:\r\n listOBJ.append(obj)\r\n\r\n\r\n except:\r\n continue\r\n\r\n except : \r\n print(\"Exception: \" + URL)\r\n listError.append(URL)\r\n continue\r\n\r\n\r\nj = 0\r\n\r\nfor z in range(len(listOBJ)):\r\n j = j + 1\r\n print(listOBJ[z].Title +\" | \"+listOBJ[z].Address +\" | \"+ str(listOBJ[z].Latitude) +\" | \"+ str(listOBJ[z].Longitude))\r\n sheet_obj_w.cell(row = j, column = 1).value = str(listOBJ[z].Title)\r\n sheet_obj_w.cell(row = j, column = 2).value = str(listOBJ[z].Address)\r\n sheet_obj_w.cell(row = j, column = 3).value = str(listOBJ[z].FullAddress)\r\n sheet_obj_w.cell(row = j, column = 4).value = str(listOBJ[z].Country)\r\n sheet_obj_w.cell(row = j, column = 5).value = str(str(listOBJ[z].Latitude))\r\n sheet_obj_w.cell(row = j, column = 6).value = str(str(listOBJ[z].Longitude))\r\n wb_obj_w.save(\"Documents/DonutKing_AUS_06052021.xlsx\")\r\n\r\nj = j + 10\r\n\r\nif(len(listError) > 0):\r\n sheet_obj_w.cell(row=j, column=1).value = 'ERROR'\r\n\r\n for z in listError:\r\n j = j + 1\r\n sheet_obj_w.cell(row=j, column=1).value = str(z)\r\n wb_obj_w.save(\"Documents/DonutKing_AUS_06052021.xlsx\")", "sub_path": "DonutKing_AUS_06052021.py", "file_name": "DonutKing_AUS_06052021.py", "file_ext": "py", "file_size_in_byte": 3660, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 10, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "66264430", "text": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Any, Iterable\nfrom uuid import UUID\n\nimport anyio\nimport attrs\nimport sniffio\nimport tenacity\nfrom sqlalchemy import and_, bindparam, or_, select\nfrom sqlalchemy.engine import URL, Result\nfrom sqlalchemy.exc import IntegrityError, InterfaceError\nfrom sqlalchemy.ext.asyncio import create_async_engine\nfrom sqlalchemy.ext.asyncio.engine import AsyncEngine\nfrom sqlalchemy.sql.ddl import DropTable\nfrom sqlalchemy.sql.elements import BindParameter\n\nfrom ..abc import AsyncDataStore, AsyncEventBroker, EventSource, Job, Schedule\nfrom ..enums import ConflictPolicy\nfrom ..eventbrokers.async_local import LocalAsyncEventBroker\nfrom ..events import (\n DataStoreEvent, JobAcquired, JobAdded, JobDeserializationFailed, ScheduleAdded,\n ScheduleDeserializationFailed, ScheduleRemoved, ScheduleUpdated, TaskAdded, TaskRemoved,\n TaskUpdated)\nfrom ..exceptions import ConflictingIdError, SerializationError, TaskLookupError\nfrom ..marshalling import callable_to_ref\nfrom ..structures import JobResult, Task\nfrom ..util import reentrant\nfrom .sqlalchemy import _BaseSQLAlchemyDataStore\n\n\n@reentrant\n@attrs.define(eq=False)\nclass AsyncSQLAlchemyDataStore(_BaseSQLAlchemyDataStore, AsyncDataStore):\n engine: AsyncEngine\n\n _events: AsyncEventBroker = attrs.field(factory=LocalAsyncEventBroker)\n _retrying: tenacity.AsyncRetrying = attrs.field(init=False)\n\n @classmethod\n def from_url(cls, url: str | URL, **options) -> AsyncSQLAlchemyDataStore:\n engine = create_async_engine(url, future=True)\n return cls(engine, **options)\n\n def __attrs_post_init__(self) -> None:\n super().__attrs_post_init__()\n\n # Construct the Tenacity retry controller\n # OSError is raised by asyncpg if it can't connect\n self._retrying = tenacity.AsyncRetrying(\n stop=self.retry_settings.stop, wait=self.retry_settings.wait,\n retry=tenacity.retry_if_exception_type((InterfaceError, OSError)),\n after=self._after_attempt, sleep=anyio.sleep, reraise=True)\n\n async def __aenter__(self):\n asynclib = sniffio.current_async_library() or '(unknown)'\n if asynclib != 'asyncio':\n raise RuntimeError(f'This data store requires asyncio; currently running: {asynclib}')\n\n # Verify that the schema is in place\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n if self.start_from_scratch:\n for table in self._metadata.sorted_tables:\n await conn.execute(DropTable(table, if_exists=True))\n\n await conn.run_sync(self._metadata.create_all)\n query = select(self.t_metadata.c.schema_version)\n result = await conn.execute(query)\n version = result.scalar()\n if version is None:\n await conn.execute(self.t_metadata.insert(values={'schema_version': 1}))\n elif version > 1:\n raise RuntimeError(\n f'Unexpected schema version ({version}); '\n f'only version 1 is supported by this version of APScheduler')\n\n await self._events.__aenter__()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self._events.__aexit__(exc_type, exc_val, exc_tb)\n\n @property\n def events(self) -> EventSource:\n return self._events\n\n async def _deserialize_schedules(self, result: Result) -> list[Schedule]:\n schedules: list[Schedule] = []\n for row in result:\n try:\n schedules.append(Schedule.unmarshal(self.serializer, row._asdict()))\n except SerializationError as exc:\n await self._events.publish(\n ScheduleDeserializationFailed(schedule_id=row['id'], exception=exc))\n\n return schedules\n\n async def _deserialize_jobs(self, result: Result) -> list[Job]:\n jobs: list[Job] = []\n for row in result:\n try:\n jobs.append(Job.unmarshal(self.serializer, row._asdict()))\n except SerializationError as exc:\n await self._events.publish(\n JobDeserializationFailed(job_id=row['id'], exception=exc))\n\n return jobs\n\n async def add_task(self, task: Task) -> None:\n insert = self.t_tasks.insert().\\\n values(id=task.id, func=callable_to_ref(task.func),\n max_running_jobs=task.max_running_jobs,\n misfire_grace_time=task.misfire_grace_time)\n try:\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n await conn.execute(insert)\n except IntegrityError:\n update = self.t_tasks.update().\\\n values(func=callable_to_ref(task.func), max_running_jobs=task.max_running_jobs,\n misfire_grace_time=task.misfire_grace_time).\\\n where(self.t_tasks.c.id == task.id)\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n await conn.execute(update)\n\n await self._events.publish(TaskUpdated(task_id=task.id))\n else:\n await self._events.publish(TaskAdded(task_id=task.id))\n\n async def remove_task(self, task_id: str) -> None:\n delete = self.t_tasks.delete().where(self.t_tasks.c.id == task_id)\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n result = await conn.execute(delete)\n if result.rowcount == 0:\n raise TaskLookupError(task_id)\n else:\n await self._events.publish(TaskRemoved(task_id=task_id))\n\n async def get_task(self, task_id: str) -> Task:\n query = select([self.t_tasks.c.id, self.t_tasks.c.func, self.t_tasks.c.max_running_jobs,\n self.t_tasks.c.state, self.t_tasks.c.misfire_grace_time]).\\\n where(self.t_tasks.c.id == task_id)\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n result = await conn.execute(query)\n row = result.one()\n\n if row:\n return Task.unmarshal(self.serializer, row._asdict())\n else:\n raise TaskLookupError\n\n async def get_tasks(self) -> list[Task]:\n query = select([self.t_tasks.c.id, self.t_tasks.c.func, self.t_tasks.c.max_running_jobs,\n self.t_tasks.c.state, self.t_tasks.c.misfire_grace_time]).\\\n order_by(self.t_tasks.c.id)\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n result = await conn.execute(query)\n tasks = [Task.unmarshal(self.serializer, row._asdict()) for row in result]\n return tasks\n\n async def add_schedule(self, schedule: Schedule, conflict_policy: ConflictPolicy) -> None:\n event: DataStoreEvent\n values = schedule.marshal(self.serializer)\n insert = self.t_schedules.insert().values(**values)\n try:\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n await conn.execute(insert)\n except IntegrityError:\n if conflict_policy is ConflictPolicy.exception:\n raise ConflictingIdError(schedule.id) from None\n elif conflict_policy is ConflictPolicy.replace:\n del values['id']\n update = self.t_schedules.update().\\\n where(self.t_schedules.c.id == schedule.id).\\\n values(**values)\n async for attempt in self._retrying:\n async with attempt, self.engine.begin() as conn:\n await conn.execute(update)\n\n event = ScheduleUpdated(schedule_id=schedule.id,\n next_fire_time=schedule.next_fire_time)\n await self._events.publish(event)\n else:\n event = ScheduleAdded(schedule_id=schedule.id,\n next_fire_time=schedule.next_fire_time)\n await self._events.publish(event)\n\n async def remove_schedules(self, ids: Iterable[str]) -> None:\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n delete = self.t_schedules.delete().where(self.t_schedules.c.id.in_(ids))\n if self._supports_update_returning:\n delete = delete.returning(self.t_schedules.c.id)\n removed_ids: Iterable[str] = [row[0] for row in await conn.execute(delete)]\n else:\n # TODO: actually check which rows were deleted?\n await conn.execute(delete)\n removed_ids = ids\n\n for schedule_id in removed_ids:\n await self._events.publish(ScheduleRemoved(schedule_id=schedule_id))\n\n async def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:\n query = self.t_schedules.select().order_by(self.t_schedules.c.id)\n if ids:\n query = query.where(self.t_schedules.c.id.in_(ids))\n\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n result = await conn.execute(query)\n return await self._deserialize_schedules(result)\n\n async def acquire_schedules(self, scheduler_id: str, limit: int) -> list[Schedule]:\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n now = datetime.now(timezone.utc)\n acquired_until = now + timedelta(seconds=self.lock_expiration_delay)\n schedules_cte = select(self.t_schedules.c.id).\\\n where(and_(self.t_schedules.c.next_fire_time.isnot(None),\n self.t_schedules.c.next_fire_time <= now,\n or_(self.t_schedules.c.acquired_until.is_(None),\n self.t_schedules.c.acquired_until < now))).\\\n order_by(self.t_schedules.c.next_fire_time).\\\n limit(limit).with_for_update(skip_locked=True).cte()\n subselect = select([schedules_cte.c.id])\n update = self.t_schedules.update().\\\n where(self.t_schedules.c.id.in_(subselect)).\\\n values(acquired_by=scheduler_id, acquired_until=acquired_until)\n if self._supports_update_returning:\n update = update.returning(*self.t_schedules.columns)\n result = await conn.execute(update)\n else:\n await conn.execute(update)\n query = self.t_schedules.select().\\\n where(and_(self.t_schedules.c.acquired_by == scheduler_id))\n result = conn.execute(query)\n\n schedules = await self._deserialize_schedules(result)\n\n return schedules\n\n async def release_schedules(self, scheduler_id: str, schedules: list[Schedule]) -> None:\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n update_events: list[ScheduleUpdated] = []\n finished_schedule_ids: list[str] = []\n update_args: list[dict[str, Any]] = []\n for schedule in schedules:\n if schedule.next_fire_time is not None:\n try:\n serialized_trigger = self.serializer.serialize(schedule.trigger)\n except SerializationError:\n self._logger.exception(\n 'Error serializing trigger for schedule %r – '\n 'removing from data store', schedule.id)\n finished_schedule_ids.append(schedule.id)\n continue\n\n update_args.append({\n 'p_id': schedule.id,\n 'p_trigger': serialized_trigger,\n 'p_next_fire_time': schedule.next_fire_time\n })\n else:\n finished_schedule_ids.append(schedule.id)\n\n # Update schedules that have a next fire time\n if update_args:\n p_id: BindParameter = bindparam('p_id')\n p_trigger: BindParameter = bindparam('p_trigger')\n p_next_fire_time: BindParameter = bindparam('p_next_fire_time')\n update = self.t_schedules.update().\\\n where(and_(self.t_schedules.c.id == p_id,\n self.t_schedules.c.acquired_by == scheduler_id)).\\\n values(trigger=p_trigger, next_fire_time=p_next_fire_time,\n acquired_by=None, acquired_until=None)\n next_fire_times = {arg['p_id']: arg['p_next_fire_time']\n for arg in update_args}\n if self._supports_update_returning:\n update = update.returning(self.t_schedules.c.id)\n updated_ids = [\n row[0] for row in await conn.execute(update, update_args)]\n else:\n # TODO: actually check which rows were updated?\n await conn.execute(update, update_args)\n updated_ids = list(next_fire_times)\n\n for schedule_id in updated_ids:\n event = ScheduleUpdated(schedule_id=schedule_id,\n next_fire_time=next_fire_times[schedule_id])\n update_events.append(event)\n\n # Remove schedules that have no next fire time or failed to serialize\n if finished_schedule_ids:\n delete = self.t_schedules.delete().\\\n where(self.t_schedules.c.id.in_(finished_schedule_ids))\n await conn.execute(delete)\n\n for event in update_events:\n await self._events.publish(event)\n\n for schedule_id in finished_schedule_ids:\n await self._events.publish(ScheduleRemoved(schedule_id=schedule_id))\n\n async def get_next_schedule_run_time(self) -> datetime | None:\n statenent = select(self.t_schedules.c.next_fire_time).\\\n where(self.t_schedules.c.next_fire_time.isnot(None)).\\\n order_by(self.t_schedules.c.next_fire_time).\\\n limit(1)\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n result = await conn.execute(statenent)\n return result.scalar()\n\n async def add_job(self, job: Job) -> None:\n marshalled = job.marshal(self.serializer)\n insert = self.t_jobs.insert().values(**marshalled)\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n await conn.execute(insert)\n\n event = JobAdded(job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id,\n tags=job.tags)\n await self._events.publish(event)\n\n async def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:\n query = self.t_jobs.select().order_by(self.t_jobs.c.id)\n if ids:\n job_ids = [job_id for job_id in ids]\n query = query.where(self.t_jobs.c.id.in_(job_ids))\n\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n result = await conn.execute(query)\n return await self._deserialize_jobs(result)\n\n async def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n now = datetime.now(timezone.utc)\n acquired_until = now + timedelta(seconds=self.lock_expiration_delay)\n query = self.t_jobs.select().\\\n join(self.t_tasks, self.t_tasks.c.id == self.t_jobs.c.task_id).\\\n where(or_(self.t_jobs.c.acquired_until.is_(None),\n self.t_jobs.c.acquired_until < now)).\\\n order_by(self.t_jobs.c.created_at).\\\n with_for_update(skip_locked=True).\\\n limit(limit)\n\n result = await conn.execute(query)\n if not result:\n return []\n\n # Mark the jobs as acquired by this worker\n jobs = await self._deserialize_jobs(result)\n task_ids: set[str] = {job.task_id for job in jobs}\n\n # Retrieve the limits\n query = select([\n self.t_tasks.c.id,\n self.t_tasks.c.max_running_jobs - self.t_tasks.c.running_jobs]).\\\n where(self.t_tasks.c.max_running_jobs.isnot(None),\n self.t_tasks.c.id.in_(task_ids))\n result = await conn.execute(query)\n job_slots_left: dict[str, int] = dict(result.fetchall())\n\n # Filter out jobs that don't have free slots\n acquired_jobs: list[Job] = []\n increments: dict[str, int] = defaultdict(lambda: 0)\n for job in jobs:\n # Don't acquire the job if there are no free slots left\n slots_left = job_slots_left.get(job.task_id)\n if slots_left == 0:\n continue\n elif slots_left is not None:\n job_slots_left[job.task_id] -= 1\n\n acquired_jobs.append(job)\n increments[job.task_id] += 1\n\n if acquired_jobs:\n # Mark the acquired jobs as acquired by this worker\n acquired_job_ids = [job.id for job in acquired_jobs]\n update = self.t_jobs.update().\\\n values(acquired_by=worker_id, acquired_until=acquired_until).\\\n where(self.t_jobs.c.id.in_(acquired_job_ids))\n await conn.execute(update)\n\n # Increment the running job counters on each task\n p_id: BindParameter = bindparam('p_id')\n p_increment: BindParameter = bindparam('p_increment')\n params = [{'p_id': task_id, 'p_increment': increment}\n for task_id, increment in increments.items()]\n update = self.t_tasks.update().\\\n values(running_jobs=self.t_tasks.c.running_jobs + p_increment).\\\n where(self.t_tasks.c.id == p_id)\n await conn.execute(update, params)\n\n # Publish the appropriate events\n for job in acquired_jobs:\n await self._events.publish(JobAcquired(job_id=job.id, worker_id=worker_id))\n\n return acquired_jobs\n\n async def release_job(self, worker_id: str, task_id: str, result: JobResult) -> None:\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n # Insert the job result\n marshalled = result.marshal(self.serializer)\n insert = self.t_job_results.insert().values(**marshalled)\n await conn.execute(insert)\n\n # Decrement the running jobs counter\n update = self.t_tasks.update().\\\n values(running_jobs=self.t_tasks.c.running_jobs - 1).\\\n where(self.t_tasks.c.id == task_id)\n await conn.execute(update)\n\n # Delete the job\n delete = self.t_jobs.delete().where(self.t_jobs.c.id == result.job_id)\n await conn.execute(delete)\n\n async def get_job_result(self, job_id: UUID) -> JobResult | None:\n async for attempt in self._retrying:\n with attempt:\n async with self.engine.begin() as conn:\n # Retrieve the result\n query = self.t_job_results.select().\\\n where(self.t_job_results.c.job_id == job_id)\n row = (await conn.execute(query)).fetchone()\n\n # Delete the result\n delete = self.t_job_results.delete().\\\n where(self.t_job_results.c.job_id == job_id)\n await conn.execute(delete)\n\n return JobResult.unmarshal(self.serializer, row._asdict()) if row else None\n", "sub_path": "src/apscheduler/datastores/async_sqlalchemy.py", "file_name": "async_sqlalchemy.py", "file_ext": "py", "file_size_in_byte": 22208, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sqlalchemy._BaseSQLAlchemyDataStore", "line_number": 36, "usage_type": "name"}, {"api_name": "abc.AsyncDataStore", "line_number": 36, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.engine.AsyncEngine", "line_number": 37, "usage_type": "name"}, {"api_name": "abc.AsyncEventBroker", "line_number": 39, "usage_type": "name"}, {"api_name": "attrs.field", "line_number": 39, "usage_type": "call"}, {"api_name": "eventbrokers.async_local.LocalAsyncEventBroker", "line_number": 39, "usage_type": "name"}, {"api_name": "tenacity.AsyncRetrying", "line_number": 40, "usage_type": "attribute"}, {"api_name": "attrs.field", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.URL", "line_number": 43, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.create_async_engine", "line_number": 44, "usage_type": "call"}, {"api_name": "tenacity.AsyncRetrying", "line_number": 52, "usage_type": "call"}, {"api_name": "tenacity.retry_if_exception_type", "line_number": 54, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.InterfaceError", "line_number": 54, "usage_type": "name"}, {"api_name": "anyio.sleep", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sniffio.current_async_library", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.ddl.DropTable", "line_number": 68, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 71, "usage_type": "call"}, {"api_name": "abc.EventSource", "line_number": 88, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.Result", "line_number": 91, "usage_type": "name"}, {"api_name": "abc.Schedule", "line_number": 92, "usage_type": "name"}, {"api_name": "abc.Schedule.unmarshal", "line_number": 95, "usage_type": "call"}, {"api_name": "abc.Schedule", "line_number": 95, "usage_type": "name"}, {"api_name": "exceptions.SerializationError", "line_number": 96, "usage_type": "name"}, {"api_name": "events.ScheduleDeserializationFailed", "line_number": 98, "usage_type": "call"}, {"api_name": "abc.Schedule", "line_number": 91, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.Result", "line_number": 102, "usage_type": "name"}, {"api_name": "abc.Job", "line_number": 103, "usage_type": "name"}, {"api_name": "abc.Job.unmarshal", "line_number": 106, "usage_type": "call"}, {"api_name": "abc.Job", "line_number": 106, "usage_type": "name"}, {"api_name": "exceptions.SerializationError", "line_number": 107, "usage_type": "name"}, {"api_name": "events.JobDeserializationFailed", "line_number": 109, "usage_type": "call"}, {"api_name": "abc.Job", "line_number": 102, "usage_type": "name"}, {"api_name": "structures.Task", "line_number": 113, "usage_type": "name"}, {"api_name": "marshalling.callable_to_ref", "line_number": 115, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 123, "usage_type": "name"}, {"api_name": "marshalling.callable_to_ref", "line_number": 125, "usage_type": "call"}, {"api_name": "events.TaskUpdated", "line_number": 133, "usage_type": "call"}, {"api_name": "events.TaskAdded", "line_number": 135, "usage_type": "call"}, {"api_name": "exceptions.TaskLookupError", "line_number": 144, "usage_type": "call"}, {"api_name": "events.TaskRemoved", "line_number": 146, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 149, "usage_type": "call"}, {"api_name": "structures.Task.unmarshal", "line_number": 159, "usage_type": "call"}, {"api_name": "structures.Task", "line_number": 159, "usage_type": "name"}, {"api_name": "exceptions.TaskLookupError", "line_number": 161, "usage_type": "name"}, {"api_name": "structures.Task", "line_number": 148, "usage_type": "name"}, {"api_name": "sqlalchemy.select", "line_number": 164, "usage_type": "call"}, {"api_name": "structures.Task.unmarshal", "line_number": 171, "usage_type": "call"}, {"api_name": "structures.Task", "line_number": 171, "usage_type": "name"}, {"api_name": "structures.Task", "line_number": 163, "usage_type": "name"}, {"api_name": "abc.Schedule", "line_number": 174, "usage_type": "name"}, {"api_name": "enums.ConflictPolicy", "line_number": 174, "usage_type": "name"}, {"api_name": "events.DataStoreEvent", "line_number": 175, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 183, "usage_type": "name"}, {"api_name": "enums.ConflictPolicy.exception", "line_number": 184, "usage_type": "attribute"}, {"api_name": "enums.ConflictPolicy", "line_number": 184, "usage_type": "name"}, {"api_name": "exceptions.ConflictingIdError", "line_number": 185, "usage_type": "call"}, {"api_name": "enums.ConflictPolicy.replace", "line_number": 186, "usage_type": "attribute"}, {"api_name": "enums.ConflictPolicy", "line_number": 186, "usage_type": "name"}, {"api_name": "events.ScheduleUpdated", "line_number": 195, "usage_type": "call"}, {"api_name": "events.ScheduleAdded", "line_number": 199, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 203, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 210, "usage_type": "name"}, {"api_name": "events.ScheduleRemoved", "line_number": 217, "usage_type": "call"}, {"api_name": "abc.Schedule", "line_number": 219, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 234, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 234, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 234, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 234, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 235, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 236, "usage_type": "call"}, {"api_name": "sqlalchemy.and_", "line_number": 237, "usage_type": "call"}, {"api_name": "sqlalchemy.or_", "line_number": 239, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 243, "usage_type": "call"}, {"api_name": "sqlalchemy.and_", "line_number": 253, "usage_type": "call"}, {"api_name": "abc.Schedule", "line_number": 230, "usage_type": "name"}, {"api_name": "abc.Schedule", "line_number": 260, "usage_type": "name"}, {"api_name": "events.ScheduleUpdated", "line_number": 264, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 266, "usage_type": "name"}, {"api_name": "exceptions.SerializationError", "line_number": 271, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.elements.BindParameter", "line_number": 288, "usage_type": "name"}, {"api_name": "sqlalchemy.bindparam", "line_number": 288, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.elements.BindParameter", "line_number": 289, "usage_type": "name"}, {"api_name": "sqlalchemy.bindparam", "line_number": 289, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.elements.BindParameter", "line_number": 290, "usage_type": "name"}, {"api_name": "sqlalchemy.bindparam", "line_number": 290, "usage_type": "call"}, {"api_name": "sqlalchemy.and_", "line_number": 292, "usage_type": "call"}, {"api_name": "events.ScheduleUpdated", "line_number": 308, "usage_type": "call"}, {"api_name": "events.ScheduleRemoved", "line_number": 322, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 325, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 324, "usage_type": "name"}, {"api_name": "abc.Job", "line_number": 335, "usage_type": "name"}, {"api_name": "events.JobAdded", "line_number": 343, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 347, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 347, "usage_type": "name"}, {"api_name": "abc.Job", "line_number": 347, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 363, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 363, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 363, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 363, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 364, "usage_type": "call"}, {"api_name": "sqlalchemy.or_", "line_number": 367, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 382, "usage_type": "call"}, {"api_name": "abc.Job", "line_number": 391, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 392, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.elements.BindParameter", "line_number": 413, "usage_type": "name"}, {"api_name": "sqlalchemy.bindparam", "line_number": 413, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.elements.BindParameter", "line_number": 414, "usage_type": "name"}, {"api_name": "sqlalchemy.bindparam", "line_number": 414, "usage_type": "call"}, {"api_name": "events.JobAcquired", "line_number": 424, "usage_type": "call"}, {"api_name": "abc.Job", "line_number": 359, "usage_type": "name"}, {"api_name": "structures.JobResult", "line_number": 428, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 447, "usage_type": "name"}, {"api_name": "structures.JobResult.unmarshal", "line_number": 461, "usage_type": "call"}, {"api_name": "structures.JobResult", "line_number": 461, "usage_type": "name"}, {"api_name": "structures.JobResult", "line_number": 447, "usage_type": "name"}, {"api_name": "util.reentrant", "line_number": 34, "usage_type": "name"}, {"api_name": "attrs.define", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "189637689", "text": "from Tkinter import *\r\nimport ttk\r\nimport PIL.Image\r\nimport PIL.ImageTk\r\n\r\n''' Initiate Tkinter. '''\r\n\r\nroot = Tk()\r\nroot.title('Architecture')\r\nroot.state('zoomed')\r\nroot.resizable(width = True, height = True)\r\nroot.columnconfigure(0, weight = 1)\r\nroot.rowconfigure(0, weight = 1)\r\n\r\n''' Initiate Notebookframe.'''\r\n\r\nNotebookFrame = LabelFrame(root, text = 'Architecture', labelanchor = 'n')\r\nNotebookFrame.columnconfigure(0, weight = 1)\r\nNotebookFrame.rowconfigure(0, weight = 1)\r\nNotebookFrame.grid(sticky = 'wsne')\r\n\r\n''' Initiate Notebook.'''\r\n\r\nNotebook = ttk.Notebook(NotebookFrame)\r\nInput = Frame(Notebook)\r\nDiagramI = Frame(Notebook)\r\nDiagramII = Frame(Notebook)\r\nCheckDesignI = Frame(Notebook)\r\nCheckDesignII = Frame(Notebook)\r\n\r\nNotebook.add(Input, text = 'Input')\r\nNotebook.add(DiagramI, text = 'DiagramI')\r\nNotebook.add(DiagramII, text = 'DiagramII')\r\nNotebook.add(CheckDesignI, text = 'CheckDesignI')\r\nNotebook.add(CheckDesignII, text = 'CheckDesignII')\r\n\r\nNotebook.grid(sticky = 'wsne')\r\n\r\n''' Add parts in Input. '''\r\ndef SetEntry(master, width = 18, hightlightthickness = 2, borderwidth = 4):\r\n\treturn Entry(master, width = width, highlightthickness = hightlightthickness, borderwidth = borderwidth)\r\n\r\ndef ImageTran(ImgFile):\r\n\timg = PIL.Image.open(ImgFile)\r\n\tw, h = img.size\r\n\timg.thumbnail((w*0.95, h*0.95))\r\n\tphoto = PIL.ImageTk.PhotoImage(img)\r\n\treturn photo\r\n\r\n#Inclination of X, Y\r\nInc_Frame = Frame(Input)\r\nInc_Frame.grid(row = 0, column = 0)\r\n\r\nInc_X = ImageTran('Input\\InclinationOfHingeAxisAroundX.png')\r\nLabel(Inc_Frame, image = Inc_X).grid(row = 0, column = 0)\r\n\r\nlabelentryIncX = Label(Inc_Frame, text = 'Inclination of hinge axis around X-axis [o]')\r\nlabelentryIncX.grid(row = 1, column = 0)\r\nentryIncX = SetEntry(Inc_Frame)\r\nentryIncX.grid(row = 2, column = 0)\r\n\r\n\r\nInc_Y = ImageTran('Input\\InclinationOfHingeAxisAroundY.png')\r\nLabel(Inc_Frame, image = Inc_Y).grid(row = 0, column = 1)\r\n\r\nlabelentryIncY = Label(Inc_Frame, text = 'Inclination of hinge axis around Y-axis [o]')\r\nlabelentryIncY.grid(row = 1, column = 1)\r\nentryIncY = SetEntry(Inc_Frame)\r\nentryIncY.grid(row = 2, column = 1)\r\n\r\n#Vehicle Side\r\nVeh_Side_frame = Frame(Input)\r\nVeh_Side_frame.grid(row = 0, column = 1)\r\n\r\nVeh_Side = ImageTran('Input\\VehicleSideInclination.png')\r\nLabel(Veh_Side_frame, image = Veh_Side).grid(row = 0, column = 0)\r\n\r\nlabelentryVeh_Side_Wc = Label(Veh_Side_frame, text = ' Wc [mm]')\r\nlabelentryVeh_Side_Wc.grid(row = 1, column = 0, sticky = 'w')\r\nlabelentryVeh_Side_Hcs = Label(Veh_Side_frame, text = 'Hcs [mm] ')\r\nlabelentryVeh_Side_Hcs.grid(row = 1, column = 0, sticky = 'e')\r\n\r\nentry_Wc = SetEntry(Veh_Side_frame, width = 15)\r\nentry_Wc.grid(row = 2, column = 0, sticky = 'w')\r\nentry_Hcs = SetEntry(Veh_Side_frame, width = 15)\r\nentry_Hcs.grid(row = 2, column = 0, sticky = 'e')\r\n\r\n\r\n#Vehcle Hill UP, DOWN\r\nHill_frame = Frame(Input)\r\nHill_frame.grid(row = 0, column = 2)\r\n\r\nHill_Up = ImageTran('Input\\VehicleUpInclination.png')\r\nLabel(Hill_frame, image = Hill_Up).grid(row = 0, column = 0)\r\n\r\nHill_Down = ImageTran('Input\\VehicleDownInclination.png')\r\nLabel(Hill_frame, image = Hill_Down).grid(row = 0, column = 1)\r\n\r\nlabelentryHillup = Label(Hill_frame, text = ' Hill up inclination[%]')\r\nlabelentryHillup.grid(row = 1, column = 0, sticky = 'w')\r\nentryHillup = SetEntry(Hill_frame)\r\nentryHillup.grid(row = 2, column = 0, sticky = 'w')\r\n\r\nlabelentryHilldown = Label(Hill_frame, text = 'Hill down inclination[%]')\r\nlabelentryHilldown.grid(row = 1, column = 1, sticky = 'w')\r\nentryHilldown = SetEntry(Hill_frame)\r\nentryHilldown.grid(row = 2, column = 1, sticky = 'w')\r\n\r\nAddHillUp = Button(Hill_frame, text = 'ADD', width = 12)\r\nCOUNTup = 1\r\ndef changeUpText():\r\n\tglobal AddHillUp\r\n\tglobal COUNTup\r\n\tCOUNTup *= -1\r\n\ttextdic = {1:'ADD', -1:'ADDED'}\r\n\tAddHillUp['text'] = textdic[COUNTup]\r\nAddHillUp['command'] = changeUpText\r\nAddHillUp.grid(row = 2, column = 0, sticky = 'e')\r\n\r\nAddHillDown = Button(Hill_frame, text = 'ADD', width = 12)\r\nCOUNTdown = 1\r\ndef changeDownText():\r\n\tglobal AddHillDown\r\n\tglobal COUNTdown\r\n\tCOUNTdown *= -1\r\n\ttextdic = {1:'ADD', -1:'ADDED'}\r\n\tAddHillDown['text'] = textdic[COUNTdown]\r\nAddHillDown['command'] = changeDownText\r\nAddHillDown.grid(row = 2, column = 1, sticky = 'e')\r\n\r\n\r\n\r\n\r\n#Door Check Parameter\r\nDCparam = ImageTran('Input\\DoorcheckHingeParameter.png')\r\nLabel(Input, image = DCparam).grid(row = 1, column = 0, columnspan = 1)\r\n\r\n\r\n#Door Parameter\r\nDoorParam_frame = Frame(Input)\r\nDoorParam_frame.grid(row = 1, column = 1, columnspan = 2)\r\n\r\nGrav_Dis = ImageTran('Input\\DistanceCenterOfGravityOfDoor.png')\r\nLabel(DoorParam_frame, image = Grav_Dis).grid(row = 0, column = 0)\r\n\r\nGrip_Pos = ImageTran('Input\\DoorGripPosition.png')\r\nLabel(DoorParam_frame, image = Grip_Pos).grid(row = 0, column = 1)\r\n\r\nAngle_Set = ImageTran('Input\\OpenAngle.png')\r\nLabel(DoorParam_frame, image = Angle_Set).grid(row = 0, column = 2)\r\n\r\n\r\n#\r\n\r\n\r\n\r\n\r\n\r\nroot.mainloop()", "sub_path": "Frame.py", "file_name": "Frame.py", "file_ext": "py", "file_size_in_byte": 4920, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "ttk.Notebook", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.Image.Image.open", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 44, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 44, "usage_type": "name"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 47, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 47, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "120956525", "text": "import argparse\nimport sys\nimport time\nimport os\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom adaptive_softmax.adaptive_softmax import AdaptiveLoss\nimport adaptive_softmax.model as model\nfrom utils.corpus import Corpus\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data', type=str,\n help='location of the data corpus')\nparser.add_argument('--dic', type=str,\n help='path to dictionary pickle')\nparser.add_argument('--old', type=str, default=None,\n help='old model to keep training')\nparser.add_argument('--model', type=str, default='GRU',\n help='type of recurrent net (RNN_TANH, RNN_RELU, GRU)')\nparser.add_argument('--emsize', type=int, default=1024,\n help='size of word embeddings')\nparser.add_argument('--nhid', type=int, default=1024,\n help='number of hidden units per layer')\nparser.add_argument('--nlayers', type=int, default=1,\n help='number of layers')\nparser.add_argument('--cutoffs', nargs='+', type=int,\n help='cutoffs for buckets in adaptive softmax')\nparser.add_argument('--lr', type=float, default=1,\n help='initial learning rate')\nparser.add_argument('--ar', type=float, default=0.9,\n help='learning rate annealing rate')\nparser.add_argument('--clip', type=float, default=0.25,\n help='gradient clipping')\nparser.add_argument('--epochs', type=int, default=10,\n help='upper epoch limit')\nparser.add_argument('--batch_size', type=int, default=64, metavar='N',\n help='batch size')\nparser.add_argument('--eval_batch_size', type=int, default=1024, metavar='N',\n help='batch size')\nparser.add_argument('--bptt', type=int, default=35,\n help='sequence length')\nparser.add_argument('--dropout', type=float, default=0.2,\n help='dropout applied to layers (0 = no dropout)')\nparser.add_argument('--tied', action='store_true',\n help='tie the word embedding and softmax weights')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed')\nparser.add_argument('--log_interval', type=int, default=100, metavar='N',\n help='report interval')\nparser.add_argument('--save', type=str, default='model.pt',\n help='path to save the final model')\n# Hardware\nparser.add_argument('--cuda', action='store_true',\n help='use CUDA')\nparser.add_argument('--gpu', type=int, default=0,\n help='gpu to use')\nargs = parser.parse_args()\n\n# Create save directory if it not exists\nif not os.path.exists(os.path.dirname(os.path.realpath(args.save))):\n os.makedirs(os.path.dirname(os.path.realpath(args.save)))\n\n# default `log_dir` is \"runs\" - we'll be more specific here\ntb_writer = SummaryWriter(os.path.join('runs/',\n os.path.basename(args.save)))\n\n# Set the random seed manually for reproducibility.\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n else:\n torch.cuda.manual_seed(args.seed)\n device = torch.device(\"cuda\", args.gpu)\nelse:\n device = torch.device(\"cpu\")\n\ncorpus = Corpus(args.data, args.dic)\nntokens = len(corpus.dictionary.idx2word)\ncutoffs = args.cutoffs + [ntokens]\nprint('ntokens:', ntokens)\nprint('cutoffs:', cutoffs)\n\nif args.old is None:\n model = model.RNNModel(args.model, ntokens, args.emsize,\n args.nhid, args.nlayers, cutoffs, args.dropout, args.tied)\nelse:\n with open(args.old, 'rb') as model_file:\n model = torch.load(model_file)\nif args.cuda:\n model = model.to(device)\n\n criterion = AdaptiveLoss(cutoffs)\n # optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr,\n # weight_decay=1e-3, t0=10)\n # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')\n\n###############################################################################\n# Training code\n###############################################################################\n\n# Loop over epochs.\nglobal lr, best_val_loss, global_step\nlr = args.lr\nbest_val_loss = None\n\n\ndef get_lr(optimizer):\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\n\ndef repackage_hidden(h):\n \"\"\"Detaches hidden states from their history\"\"\"\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(repackage_hidden(v) for v in h)\n\n\ndef evaluate(split):\n # Turn on evaluation mode which disables dropout.\n model.eval()\n total_loss, nbatches = 0, 0\n ntokens = len(corpus.dictionary.idx2word)\n with torch.no_grad():\n hidden = model.init_hidden(args.eval_batch_size)\n for source, target in corpus.iter(split, args.eval_batch_size, args.bptt,\n use_cuda=args.cuda, device=device):\n model.softmax.set_target(target.data.view(-1))\n output, hidden = model(source, hidden)\n total_loss += criterion(output, target.view(-1)).data.sum().item()\n hidden = repackage_hidden(hidden)\n nbatches += 1\n return total_loss / nbatches\n\n\ndef train():\n global lr, best_val_loss, global_step\n # Turn on training mode which enables dropout.\n model.train()\n start_time = time.time()\n total_loss, nbatches = 0, 0\n ntokens = len(corpus.dictionary.idx2word)\n hidden = model.init_hidden(args.batch_size)\n for b, batch in enumerate(corpus.iter('train', args.batch_size, args.bptt,\n use_cuda=args.cuda, device=device)):\n model.train()\n source, target = batch\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n hidden = repackage_hidden(hidden)\n model.zero_grad()\n model.softmax.set_target(target.view(-1))\n output, hidden = model(source, hidden)\n loss = criterion(output, target.view(-1))\n loss.backward()\n\n # `clip_grad_norm_` helps prevent the exploding gradient problem in RNNs.\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)\n # update the model parameters\n for p in model.parameters():\n if p.grad is not None:\n p.data.add_(-lr, p.grad.data)\n # optimizer.step()\n\n total_loss += loss.data.cpu()\n\n if b % args.log_interval == 0 and b > 0:\n cur_loss = total_loss.item() / args.log_interval\n elapsed = time.time() - start_time\n val_loss = evaluate('valid')\n # scheduler.step(val_loss)\n print('| epoch {:3d} | batch {:5d} | lr {:05.4e} | ms/batch {:3.2e} '\n '| loss {:5.4e} | ppl {:5.4e} '\n '| valid loss {:5.4e} | valid ppl {:5.4e}'\n .format(\n epoch, b, lr,\n elapsed * 1000 / args.log_interval,\n cur_loss, math.exp(cur_loss),\n val_loss, math.exp(val_loss)))\n # add log to tensorboard scalar\n tb_writer.add_scalar('cur_loss', cur_loss, global_step)\n tb_writer.add_scalar('val_loss', val_loss, global_step)\n global_step += 1\n\n # Save the model if the validation loss is the best we've seen so far.\n if not best_val_loss or val_loss < best_val_loss:\n with open(args.save, 'wb') as f:\n torch.save(model, f)\n best_val_loss = val_loss\n else:\n # Anneal the learning rate if no improvement has been seen in the validation dataset.\n pass\n lr *= args.ar\n # optimizer.lr = optimizer.lr * args.ar\n\n total_loss = 0\n start_time = time.time()\n\n\n# At any point you can hit Ctrl + C to break out of training early.\ntry:\n # reset global setp for the summary writer\n global_step = 0\n for epoch in range(1, args.epochs+1):\n epoch_start_time = time.time()\n train()\n val_loss = evaluate('valid')\n print('{:-^80}'.format(''))\n print('| end of epoch {:3d} | time: {:4.3e}s '\n '| valid loss {:5.4e} |valid ppl {:5.4e}'\n .format(epoch, (time.time() - epoch_start_time),\n val_loss, math.exp(val_loss)))\n print('{:-^80}'.format(''))\nexcept KeyboardInterrupt:\n print('-' * 89)\n print('Exiting from training early')\n\n# Load the best saved model.\nwith open(args.save, 'rb') as f:\n model = torch.load(f)\n\n# Run on test data.\ntest_loss = evaluate('test')\nprint('=' * 89)\nprint('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(\n test_loss, math.exp(test_loss)))\nprint('=' * 89)\n", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 9114, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 64, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.cuda.manual_seed", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 80, "usage_type": "call"}, {"api_name": "utils.corpus.Corpus", "line_number": 82, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 89, "usage_type": "name"}, {"api_name": "adaptive_softmax.model.RNNModel", "line_number": 89, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 93, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 95, "usage_type": "name"}, {"api_name": "adaptive_softmax.model.to", "line_number": 95, "usage_type": "call"}, {"api_name": "adaptive_softmax.adaptive_softmax.AdaptiveLoss", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 119, "usage_type": "attribute"}, {"api_name": "adaptive_softmax.model.eval", "line_number": 127, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 127, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 130, "usage_type": "call"}, {"api_name": "adaptive_softmax.model.init_hidden", "line_number": 131, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 131, "usage_type": "name"}, {"api_name": "adaptive_softmax.model.softmax.set_target", "line_number": 134, "usage_type": "call"}, {"api_name": "adaptive_softmax.model.softmax", "line_number": 134, "usage_type": "attribute"}, {"api_name": "adaptive_softmax.model", "line_number": 134, "usage_type": "name"}, {"api_name": "adaptive_softmax.model", "line_number": 135, "usage_type": "call"}, {"api_name": "adaptive_softmax.model.train", "line_number": 145, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 145, "usage_type": "name"}, {"api_name": "time.time", "line_number": 146, "usage_type": "call"}, {"api_name": "adaptive_softmax.model.init_hidden", "line_number": 149, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 149, "usage_type": "name"}, {"api_name": "adaptive_softmax.model.train", "line_number": 152, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 152, "usage_type": "name"}, {"api_name": "adaptive_softmax.model.zero_grad", "line_number": 157, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 157, "usage_type": "name"}, {"api_name": "adaptive_softmax.model.softmax.set_target", "line_number": 158, "usage_type": "call"}, {"api_name": "adaptive_softmax.model.softmax", "line_number": 158, "usage_type": "attribute"}, {"api_name": "adaptive_softmax.model", "line_number": 158, "usage_type": "name"}, {"api_name": "adaptive_softmax.model", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 164, "usage_type": "attribute"}, {"api_name": "adaptive_softmax.model.parameters", "line_number": 164, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 164, "usage_type": "name"}, {"api_name": "adaptive_softmax.model.parameters", "line_number": 166, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 166, "usage_type": "name"}, {"api_name": "time.time", "line_number": 175, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 184, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 194, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 194, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 203, "usage_type": "call"}, {"api_name": "time.time", "line_number": 211, "usage_type": "call"}, {"api_name": "time.time", "line_number": 217, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 218, "usage_type": "call"}, {"api_name": "adaptive_softmax.model", "line_number": 226, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 226, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 232, "usage_type": "call"}]} +{"seq_id": "13619129", "text": "#!/usr/bin/python\n\n\"\"\"Project Euler Solution 019\n\nCopyright (c) 2011 by Robert Vella - robert.r.h.vella@gmail.com\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and / or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport cProfile\nfrom datetime import date, timedelta\n\ndef get_answer():\n \"\"\"Question:\n \n You are given the following information, but you may prefer to do some \n research for yourself.\n\n * 1 Jan 1900 was a Monday.\n \n * Thirty days has September,\n April, June and November.\n All the rest have thirty-one,\n Saving February alone,\n Which has twenty-eight, rain or shine.\n And on leap years, twenty-nine.\n \n * A leap year occurs on any year evenly divisible by 4, but not on a \n century unless it is divisible by 400.\n\n How many Sundays fell on the first of the month during the twentieth \n century (1 Jan 1901 to 31 Dec 2000)?\n \"\"\"\n \n #Go through all the dates mentioned and count the number of sundays\n #which fall on the first of the month.\n current_date = date(2000, 12, 31)\n one_day = timedelta(1)\n \n total_sundays_on_first_day = 0 \n \n while current_date >= date(1901, 1, 1):\n if current_date.weekday() == 6 and current_date.strftime('%d') == '01':\n total_sundays_on_first_day += 1\n \n current_date -= one_day\n \n return total_sundays_on_first_day\n\nif __name__ == \"__main__\":\n cProfile.run(\"print(get_answer())\")\n", "sub_path": "answers/euler019.py", "file_name": "euler019.py", "file_ext": "py", "file_size_in_byte": 2411, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "datetime.date", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 58, "usage_type": "call"}, {"api_name": "cProfile.run", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "487948936", "text": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io as sio\n#from natsort import natsorted\ndef match(artist):\n return artist.__module__ == \"matplotlib.text\"\n\nos.chdir('/home/thaa191/Programing/Dumbbells/Disorder_data/')\n\nimbal1 = []\nerror1 = []\ndata = sio.loadmat('Length_150/Fill_0.mat')\nimbal1.append(data['imbalance'][0])\nerror1.append(data['error'][0])\ntime = np.array(data['time'][0])\n\nos.chdir('/home/thaa191/Programing/Dumbbells/Bent_Channel/')\n\nimbal2 = []\nerror2 = []\ndata = sio.loadmat('Length_50-50.mat')\nimbal2.append(data['imbalance'][0])\nerror2.append(data['error'][0])\n\nimbal3 = []\nerror3 = []\ndata = sio.loadmat('Length_100-50.mat')\nimbal3.append(data['imbalance'][0])\nerror3.append(data['error'][0])\n\nfig_imbal = plt.figure(figsize=(10, 8), dpi=80)\nax = plt.subplot(111)\nax.errorbar(time, imbal1[0], yerr=error1[0], fmt='o',ms=8)\nax.errorbar(time, imbal2[0], yerr=error2[0], fmt='x',ms=8)\nax.errorbar(time, imbal3[0], yerr=error3[0], fmt='v',ms=8)\nlegend_list=['Straight','50-50','100-50']\nax.legend(legend_list, fontsize = 5,loc=2,bbox_to_anchor=(0.85,1),borderaxespad=0.)\n#plot_title = \"Imbalances for Length \"+str(length)+\" (px)\"\n#plt.title(plot_title)\nax.set_xlabel('Expansion Time (ms)')\nax.set_ylabel('Imbalance')\nax.axhline(y=0, xmin=0, xmax=1,color = 'k')\nfont_size=24\nax.set_ylim(-0.3,1.05)\nax.set_xlim(0,255)\nfor textobj in fig_imbal.findobj(match=match):\n textobj.set_fontsize(font_size)\nplt.tight_layout()\n\n#fig_imbal.savefig(\"Imbalance_Plot_Length_\"+str(length)+\".pdf\")", "sub_path": "bent_Channel_Imbal_plot.py", "file_name": "bent_Channel_Imbal_plot.py", "file_ext": "py", "file_size_in_byte": 1525, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.chdir", "line_number": 9, "usage_type": "call"}, {"api_name": "scipy.io.loadmat", "line_number": 13, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.io.loadmat", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 22, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 28, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "383180067", "text": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport os\nimport time\nimport logging\nimport tarfile\nimport torch\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom pandas.plotting import scatter_matrix\nimport multiprocessing\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom functools import partial\n\nfrom .tools import create_missing_folders, load, load_and_check\nfrom .plotting import draw_weighted_distributions, draw_unweighted_distributions, draw_ROC, resampled_discriminator_and_roc\nfrom sklearn.model_selection import train_test_split\nlogger = logging.getLogger(__name__)\n\n\nclass Loader():\n \"\"\"\n Loading of data.\n \"\"\"\n def __init__(self):\n super(Loader, self).__init__()\n\n def loading(\n self,\n folder=None,\n plot=False,\n var = 'QSFUP',\n do = 'dilepton',\n x0 = None,\n x1 = None,\n randomize = False,\n save = False,\n correlation = True,\n preprocessing = True,\n nentries = 0,\n path = '',\n ):\n \"\"\"\n Parameters\n ----------\n folder : str or None\n Path to the folder where the resulting samples should be saved (ndarrays in .npy format). Default value:\n None.\n plot : bool, optional\n make validation plots\n do : str\n Decide what samples to use. Can either be Sherpa Vs Madgraph ('sherpaVsMG5'), Renormalization scale up vs down ('mur') or qsf scale up vs down ('qsf') \n Default value: 'sherpaVsMG5'\n x0 : dataframe of none\n Either pass a dataframe as in notebook, or None to load sample according to do option. \n x1 : dataframe of none\n Either pass a dataframe as in notebook, or None to load sample according to do option. \n randomize : bool, optional\n Randomize training sample. Default value: \n False\n save : bool, optional\n Save training ans test samples. Default value:\n False\n Returns\n -------\n x : ndarray\n Observables with shape `(n_samples, n_observables)`. The same information is saved as a file in the given\n folder.\n y : ndarray\n Class label with shape `(n_samples, n_parameters)`. `y=0` (`1`) for events sample from the numerator\n (denominator) hypothesis. The same information is saved as a file in the given folder.\n \"\"\"\n\n create_missing_folders([folder+'/'+do+'/'+var])\n create_missing_folders(['plots'])\n # load samples\n etaJ = [-2.8,-2.4,-2,-1.6,-1.2,-0.8,-0.4,0,0.4,0.8,1.2,1.6,2,2.4,2.8]\n eventVars = ['Njets', 'MET']\n jetVars = ['Jet_Pt', 'Jet_Mass']\n lepVars = ['Lepton_Pt']\n jetBinning = [range(0, 1500, 50), range(0, 200, 10)]\n lepBinning = [range(0, 700, 20)]\n x0, vlabels = load(f = path+'/Sh_228_ttbar_'+do+'_EnhMaxHTavrgTopPT_nominal.root', \n events = eventVars, jets = jetVars, leps = lepVars, n = int(nentries), t = 'Tree', do = do)\n x1, vlabels = load(f = path+'/Sh_228_ttbar_'+do+'_EnhMaxHTavrgTopPT_'+var+'.root', \n events = eventVars, jets = jetVars, leps = lepVars, n = int(nentries), t = 'Tree', do = do)\n binning = [range(0, 12, 1), range(0, 900, 25)]+jetBinning+jetBinning+lepBinning+lepBinning\n if preprocessing:\n factor = 5\n x00 = len(x0)\n x10 = len(x1)\n for column in x0.columns:\n upper_lim = x0[column].mean () + x0[column].std () * factor\n upper_lim = x1[column].mean () + x1[column].std () * factor\n lower_lim = x0[column].mean () - x0[column].std () * factor\n lower_lim = x1[column].mean () - x1[column].std () * factor\n x0 = x0[(x0[column] < upper_lim) & (x0[column] > lower_lim)]\n x1 = x1[(x1[column] < upper_lim) & (x1[column] > lower_lim)]\n x0 = x0.round(decimals=2)\n x1 = x1.round(decimals=2)\n print(\"filtered x0 outliers: \", (x00-len(x0))/len(x0)*100, \"% \")\n print(\"filtered x1 outliers: \", (x10-len(x1))/len(x1)*100, \"% \")\n\n\n if correlation:\n cor0 = x0.corr()\n sns.heatmap(cor0, annot=True, cmap=plt.cm.Reds)\n cor_target = abs(cor0[x0.columns[0]])\n relevant_features = cor_target[cor_target>0.5]\n print(\"relevant_features \", relevant_features)\n if plot:\n plt.savefig('plots/scatterMatrix_'+do+'_'+var+'.png')\n plt.clf()\n\n X0 = x0.to_numpy()\n X1 = x1.to_numpy()\n # combine\n y0 = np.zeros(x0.shape[0])\n y1 = np.ones(x1.shape[0])\n\n X0_train, X0_test, y0_train, y0_test = train_test_split(X0, y0, test_size=0.40, random_state=42)\n X1_train, X1_test, y1_train, y1_test = train_test_split(X1, y1, test_size=0.40, random_state=42)\n X0_train, X0_val, y0_train, y0_val = train_test_split(X0_train, y0_train, test_size=0.50, random_state=42)\n X1_train, X1_val, y1_train, y1_val = train_test_split(X1_train, y1_train, test_size=0.50, random_state=42)\n X_train = np.vstack([X0_train, X1_train])\n y_train = np.concatenate((y0_train, y1_train), axis=None)\n X_val = np.vstack([X0_val, X1_val])\n y_val = np.concatenate((y0_val, y1_val), axis=None)\n print(\"y_val, \", y_val)\n # save data\n if folder is not None and save:\n np.save(folder + do + '/' + var + \"/X_train_\" +str(nentries)+\".npy\", X_train)\n np.save(folder + do + '/' + var + \"/y_train_\" +str(nentries)+\".npy\", y_train)\n np.save(folder + do + '/' + var + \"/X_val_\" +str(nentries)+\".npy\", X_val)\n np.save(folder + do + '/' + var + \"/y_val_\" +str(nentries)+\".npy\", y_val)\n np.save(folder + do + '/' + var + \"/X0_val_\" +str(nentries)+\".npy\", X0_val)\n np.save(folder + do + '/' + var + \"/X1_val_\" +str(nentries)+\".npy\", X1_val)\n np.save(folder + do + '/' + var + \"/X0_train_\"+str(nentries)+\".npy\", X0_train)\n np.save(folder + do + '/' + var + \"/X1_train_\"+str(nentries)+\".npy\", X1_train)\n #Tar data files if training is done on GPU\n if torch.cuda.is_available():\n plot = False #don't plot on GPU...\n tar = tarfile.open(\"data_out.tar.gz\", \"w:gz\")\n for name in [folder + do + '/' + var + \"/X_train_\" +str(nentries)+\".npy\", \n folder + do + '/' + var + \"/y_train_\" +str(nentries)+\".npy\",\n folder + do + '/' + var + \"/X_val_\" +str(nentries)+\".npy\",\n folder + do + '/' + var + \"/y_val_\" +str(nentries)+\".npy\",\n folder + do + '/' + var + \"/X0_val_\" +str(nentries)+\".npy\",\n folder + do + '/' + var + \"/X1_val_\" +str(nentries)+\".npy\",\n folder + do + '/' + var + \"/X0_train_\"+str(nentries)+\".npy\",\n folder + do + '/' + var + \"/X1_train_\"+str(nentries)+\".npy\"]:\n tar.add(name)\n tar.close()\n\n if plot and int(nentries) > 10000: # no point in plotting distributions with too few events\n draw_unweighted_distributions(X0, X1, np.ones(X0[:,0].size), x0.columns, vlabels, binning, var, do, nentries, plot) \n print(\"saving plots\")\n return X_train, y_train, X0_train, X1_train\n\n def load_result(\n self,\n x0,\n x1,\n weights = None,\n label = None,\n do = 'dilepton',\n var = 'qsf',\n plot = False,\n n = 0,\n path = '',\n ):\n \"\"\"\n Parameters\n ----------\n weights : ndarray\n r_hat weights:\n Returns\n -------\n \"\"\"\n eventVars = ['Njets', 'MET']\n jetVars = ['Jet_Pt', 'Jet_Mass']\n lepVars = ['Lepton_Pt']\n etaJ = [-2.8,-2.4,-2,-1.6,-1.2,-0.8,-0.4,0,0.4,0.8,1.2,1.6,2,2.4,2.8]\n jetBinning = [range(0, 1500, 50), range(0, 200, 10)]\n lepBinning = [range(0, 700, 20)]\n\n binning = [range(0, 12, 1), range(0, 900, 25)]+jetBinning+jetBinning+lepBinning+lepBinning\n x0df, labels = load(f = path+'/Sh_228_ttbar_'+do+'_EnhMaxHTavrgTopPT_nominal.root', \n events = eventVars, jets = jetVars, leps = lepVars, n = 1, t = 'Tree')\n # load samples\n X0 = load_and_check(x0, memmap_files_larger_than_gb=1.0)\n X1 = load_and_check(x1, memmap_files_larger_than_gb=1.0)\n weights = weights / weights.sum() * len(X1)\n if int(n) > 10000: # no point in plotting distributions with too few events, they only look bad \n # plot ROC curves \n draw_ROC(X0, X1, weights, label, var, do, plot)\n # plot reweighted distributions \n draw_weighted_distributions(X0, X1, weights, x0df.columns, labels, binning, label, var, do, n, plot) \n\n def load_calibration(\n self,\n y_true,\n p1_raw = None,\n p1_cal = None,\n label = None,\n do = 'dilepton',\n var = 'qsf',\n plot = False\n ):\n \"\"\"\n Parameters\n ----------\n y_true : ndarray\n true targets\n p1_raw : ndarray\n uncalibrated probabilities of the positive class\n p1_cal : ndarray\n calibrated probabilities of the positive class\n Returns\n -------\n \"\"\"\n\n # load samples\n y_true = load_and_check(y_true, memmap_files_larger_than_gb=1.0)\n plot_calibration_curve(y_true, p1_raw, p1_cal, do, var, plot) \n", "sub_path": "network/utils/loading.py", "file_name": "loading.py", "file_ext": "py", "file_size_in_byte": 10048, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.use", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "tools.create_missing_folders", "line_number": 76, "usage_type": "call"}, {"api_name": "tools.create_missing_folders", "line_number": 77, "usage_type": "call"}, {"api_name": "tools.load", "line_number": 85, "usage_type": "call"}, {"api_name": "tools.load", "line_number": 87, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 109, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 121, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 124, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 125, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 143, "usage_type": "attribute"}, {"api_name": "tarfile.open", "line_number": 145, "usage_type": "call"}, {"api_name": "plotting.draw_unweighted_distributions", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 158, "usage_type": "call"}, {"api_name": "tools.load", "line_number": 190, "usage_type": "call"}, {"api_name": "tools.load_and_check", "line_number": 193, "usage_type": "call"}, {"api_name": "tools.load_and_check", "line_number": 194, "usage_type": "call"}, {"api_name": "plotting.draw_ROC", "line_number": 198, "usage_type": "call"}, {"api_name": "plotting.draw_weighted_distributions", "line_number": 200, "usage_type": "call"}, {"api_name": "tools.load_and_check", "line_number": 226, "usage_type": "call"}]} +{"seq_id": "242237273", "text": "import os, logging\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nfrom flask import flash, current_app, render_template\n\ndef send_mime_email(recipients, text_body, image_file, html_body):\n SENDER = current_app.config['MAIL_USERNAME']\n current_app.logger.setLevel(logging.INFO)\n current_app.logger.info('Sending MIME email')\n RECIPIENT = recipients\n\n # If necessary, replace us-west-2 with the AWS Region you're using for Amazon SES.\n AWS_REGION = \"us-east-1\"\n\n SUBJECT = \"You Rack Discipline\"\n BODY_TEXT = text_body\n BODY_HTML = html_body\n\n # The character encoding for the email.\n CHARSET = \"UTF-8\"\n\n # Create a new SES resource and specify a region.\n client = boto3.client('ses', region_name=AWS_REGION)\n\n # The full path to the file that will be attached to the email.\n ATTACHMENT = os.path.join(current_app.config['UPLOAD_FOLDER'], image_file)\n\n # Create a multipart/mixed parent container.\n msg = MIMEMultipart('mixed')\n # Add subject, from and to lines.\n msg['Subject'] = SUBJECT\n msg['From'] = SENDER\n msg['To'] = ', '.join(recipients)\n\n # Encode the text and HTML content and set the character encoding. This step is\n # necessary if you're sending a message with characters outside the ASCII range.\n textpart = MIMEText(BODY_TEXT.encode(CHARSET), 'plain', CHARSET)\n htmlpart = MIMEText(BODY_HTML.encode(CHARSET), 'html', CHARSET)\n\n # Encapsulate the plain and HTML versions of the message body in an\n # 'alternative' part, so message agents can decide which they want to display.\n msgAlternative = MIMEMultipart('alternative')\n msgAlternative.attach(textpart)\n msgAlternative.attach(htmlpart)\n # Attach the multipart/alternative child container to the multipart/mixed parent container.\n msg.attach(msgAlternative)\n\n # Define the attachment part and encode it using MIMEImage.\n try:\n fp = open(ATTACHMENT, 'rb')\n att = MIMEImage(fp.read())\n fp.close()\n # Add a header to tell the email client to treat this part as an attachment,\n # and to give the attachment a name.\n att.add_header('Content-ID', image_file)\n # Add the attachment to the parent container.\n msg.attach(att)\n except IOError as e:\n flash('Looks like the file is missing!')\n flash(e)\n\n try:\n #Provide the contents of the email.\n response = client.send_raw_email(\n Source=SENDER,\n Destinations=RECIPIENT,\n RawMessage={\n 'Data':msg.as_string(),\n },\n )\n\n except ClientError as e:\n flash(e.response['Error']['Message'])\n current_app.logger.setLevel(logging.ERROR)\n current_app.logger.info('Error sending email:', e.response['Error']['Code'])\n current_app.logger.info(' :', e.response['Error']['Message'])\n current_app.logger.info(' :', e.response['Error']['Type'])\n else:\n flash(\"Email sent! Message ID:\" + response['MessageId'])\n\n return\n\ndef send_image_email(recipients, text_body, html_body):\n SENDER = current_app.config['MAIL_USERNAME']\n current_app.logger.setLevel(logging.INFO)\n current_app.logger.info('Sending inline email')\n RECIPIENT = recipients\n\n # If necessary, replace us-west-2 with the AWS Region you're using for Amazon SES.\n AWS_REGION = \"us-east-1\"\n\n SUBJECT = \"You Rack Discipline Too\"\n BODY_TEXT = text_body\n BODY_HTML = html_body\n\n # The character encoding for the email.\n CHARSET = \"UTF-8\"\n\n # Create a new SES resource and specify a region.\n client = boto3.client('ses', region_name=AWS_REGION)\n\n try:\n #Provide the contents of the email.\n response = client.send_email(\n Source=SENDER,\n Destination={\n 'ToAddresses': RECIPIENT\n },\n Message={\n 'Body': {\n 'Html': {\n 'Charset': CHARSET,\n 'Data': BODY_HTML,\n },\n 'Text': {\n 'Charset': CHARSET,\n 'Data': BODY_TEXT,\n },\n },\n 'Subject': {\n 'Charset': CHARSET,\n 'Data': SUBJECT,\n },\n },\n )\n # Display an error if something goes wrong.\n except ClientError as e:\n flash(e.response['Error']['Message'])\n current_app.logger.setLevel(logging.ERROR)\n current_app.logger.info('Error sending email:', e.response['Error']['Code'])\n current_app.logger.info(' :', e.response['Error']['Message'])\n current_app.logger.info(' :', e.response['Error']['Type'])\n else:\n flash(\"Email sent! Message ID:\" + response['MessageId'])\n\n return\n\ndef send_password_reset_email(user):\n token = user.get_reset_password_token()\n SENDER = \"yourackdiscipline@gmail.com\"\n current_app.logger.setLevel(logging.INFO)\n current_app.logger.info('Sending password reset email')\n\n RECIPIENT = user.Email\n\n # If necessary, replace us-west-2 with the AWS Region you're using for Amazon SES.\n AWS_REGION = \"us-east-1\"\n\n # The subject line for the email.\n SUBJECT = \"You Rack Discipline Password Reset\"\n BODY_TEXT = render_template('email/reset_password.txt', user=user, token=token)\n BODY_HTML = render_template('email/reset_password.html', user=user, token=token)\n\n # The character encoding for the email.\n CHARSET = \"UTF-8\"\n\n # Create a new SES resource and specify a region.\n client = boto3.client('ses', region_name=AWS_REGION)\n\n try:\n # Provide the contents of the email.\n response = client.send_email(\n Source=SENDER,\n Destination={\n 'ToAddresses': [\n RECIPIENT,\n ],\n },\n Message={\n 'Body': {\n 'Html': {\n 'Charset': CHARSET,\n 'Data': BODY_HTML,\n },\n 'Text': {\n 'Charset': CHARSET,\n 'Data': BODY_TEXT,\n },\n },\n 'Subject': {\n 'Charset': CHARSET,\n 'Data': SUBJECT,\n },\n },\n )\n # Display an error if something goes wrong.\n except ClientError as e:\n current_app.logger.setLevel(logging.ERROR)\n current_app.logger.info('Error sending email:', e.response['Error']['Code'])\n current_app.logger.info(' :', e.response['Error']['Message'])\n current_app.logger.info(' :', e.response['Error']['Type'])\n\n return\n\n", "sub_path": "team9/email/email.py", "file_name": "email.py", "file_ext": "py", "file_size_in_byte": 6975, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.current_app.config", "line_number": 10, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 10, "usage_type": "name"}, {"api_name": "flask.current_app.logger.setLevel", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 11, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.current_app.logger.info", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 12, "usage_type": "name"}, {"api_name": "boto3.client", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.current_app.config", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 29, "usage_type": "name"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 32, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 40, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 41, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 45, "usage_type": "call"}, {"api_name": "email.mime.image.MIMEImage", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 63, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.current_app.logger.setLevel", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 77, "usage_type": "name"}, {"api_name": "logging.ERROR", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.current_app.logger.info", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.current_app.logger.setLevel", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 88, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.current_app.logger.info", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 89, "usage_type": "name"}, {"api_name": "boto3.client", "line_number": 103, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 131, "usage_type": "call"}, {"api_name": "flask.current_app.logger.setLevel", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 132, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 132, "usage_type": "name"}, {"api_name": "logging.ERROR", "line_number": 132, "usage_type": "attribute"}, {"api_name": "flask.current_app.logger.info", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 133, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 133, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 135, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 135, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 137, "usage_type": "call"}, {"api_name": "flask.current_app.logger.setLevel", "line_number": 144, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 144, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 144, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 144, "usage_type": "attribute"}, {"api_name": "flask.current_app.logger.info", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 145, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 145, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 155, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 161, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 190, "usage_type": "name"}, {"api_name": "flask.current_app.logger.setLevel", "line_number": 191, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 191, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 191, "usage_type": "name"}, {"api_name": "logging.ERROR", "line_number": 191, "usage_type": "attribute"}, {"api_name": "flask.current_app.logger.info", "line_number": 192, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 192, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 192, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 193, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 193, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 193, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 194, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 194, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 194, "usage_type": "name"}]} +{"seq_id": "4501527", "text": "from help import usage\nfrom trains import stations, trains, routes\n\nimport os\nimport json\nimport pytz\nfrom datetime import datetime, timedelta\n\nimport requests\nimport lxml.html\n\nimport geopy.distance\n\n# from apigw import request, response, access, gateway_request, gateway_response\nfrom xinetd import request, response, access, gateway_request, gateway_response\n\n\ndef unix_time_stamp(stamp, hhmm):\n\n val = hhmm.split(':')\n\n if len(val) != 2:\n return 0\n\n hh = int(val[0])\n mm = int(val[1])\n\n if hh < 0 or hh > 23 or mm < 0 or mm > 59:\n return 0\n\n t_init = datetime(stamp.year, stamp.month, stamp.day, hh, mm)\n\n time_eu = pytz.timezone('Europe/Berlin').localize(t_init).timestamp()\n\n return int(time_eu) + (86400 if (stamp.hour - hh) > 1 else 0)\n\n\ndef train_direction(route, current_station, destination):\n\n if route in routes:\n try:\n destination = destination.replace(' tief', '')\n curr_index = routes[route].index(current_station)\n dest_index = routes[route].index(destination)\n\n if curr_index < dest_index:\n return routes[route][-1]\n else:\n return routes[route][0]\n\n except ValueError:\n return ''\n\n\ndef scrape_rmv():\n\n station_id = request.params.get('station_id', None)\n\n if not station_id or station_id not in stations:\n return response({'message': 'Invalid station ID'}, 400)\n\n train_ids = request.params.get('trains', None)\n trains_list = train_ids.upper().split(',') if train_ids else []\n\n if trains_list and not all(train in trains for train in trains_list):\n return response({'message': 'Invalid train ID(s)'}, 400)\n\n try:\n max_num = int(request.params.get('maxnum', 0))\n except ValueError:\n max_num = 0\n\n if (max_num < 1):\n max_num = 20\n elif (max_num > 50):\n max_num = 50\n\n try:\n time_stamp = int(request.params.get('timestamp', 0))\n except ValueError:\n time_stamp = 0\n\n if (time_stamp < 1514764800):\n stamp = datetime.now(tz=pytz.timezone('Europe/Berlin'))\n elif (time_stamp < 201801010000):\n stamp = datetime.fromtimestamp(time_stamp, pytz.timezone('Europe/Berlin'))\n else:\n stamp = datetime.strptime(str(time_stamp), '%Y%m%d%H%M')\n\n url = 'https://www.rmv.de/auskunft/bin/jp/stboard.exe/dn'\n\n query = {\n 'protocol': 'https:',\n 'input': station_id,\n 'boardType': 'dep',\n 'time': stamp.strftime('%H:%M'),\n 'maxJourneys': 50,\n 'selectDate': 'today',\n 'dateBegin': None,\n 'dateEnd': None,\n 'productsFilter': '0001',\n 'start': 'yes',\n 'pageViewMode': 'PRINT'\n }\n\n today = datetime.now(tz=pytz.timezone('Europe/Berlin')).strftime('%d.%m.%Y')\n request_date = stamp.strftime('%d.%m.%Y')\n\n if request_date != today:\n query['selectDate'] = 'period'\n query['dateBegin'] = request_date\n query['dateEnd'] = (stamp + timedelta(days=1)).strftime('%d.%m.%Y')\n\n xp_trains = '//table/tbody/tr[td/@class=\"product\"]'\n xp_routes = 'string(./td[@class=\"product\"])'\n xp_dest = 'string(./td[@class=\"timetable\"]/strong)'\n xp_platform = './td[@class=\"platform\"]//text()'\n xp_platform_change = './td[@class=\"platform\"]//img'\n xp_sched = 'string(./td[@class=\"time\"]/text())'\n xp_progn = 'string(./td[@class=\"time\"]/span[@class=\"prognosis\"])'\n xp_info = './following-sibling::tr[1]//div[@class=\"journeyMessageHIM\"]'\n xp_info1 = 'string(.//strong)'\n xp_info2 = './span/text()'\n\n train_count = 0\n\n try:\n rmv_response = requests.get(url, params=query, timeout=5)\n except requests.exceptions.RequestException:\n return response({'message': 'Invalid response from RMV'}, 503)\n\n if rmv_response.status_code != 200 or not rmv_response.content:\n return response({'message': 'Invalid response from RMV'}, 502)\n\n schedule = {\n 'timestamp': int(stamp.timestamp()),\n 'station_name': stations[station_id][0],\n 'url': rmv_response.url,\n 'train_info': []\n }\n\n html_tree = lxml.html.fromstring(rmv_response.content)\n html_trains = html_tree.xpath(xp_trains)\n\n for train in html_trains:\n route = train.xpath(xp_routes).strip('\\r\\n ').replace(' ', '')\n destination = train.xpath(xp_dest).strip('\\r\\n ').replace('\\n', ' ')\n direction = train_direction(route, stations[station_id][0], destination)\n\n if (trains_list and\n direction not in trains_list and\n route not in trains_list and\n route + direction not in trains_list):\n continue\n\n platform = ''\n platform_info = train.xpath(xp_platform)\n if platform_info:\n for p in platform_info:\n p = p.strip('\\r\\n \\xc2\\xa0').replace('\\n', ' ')\n if p:\n platform = p\n break\n\n platform_change = train.xpath(xp_platform_change)\n if platform_change:\n platform = platform + '*'\n\n info = []\n info_tables = train.xpath(xp_info)\n\n for info_table in info_tables:\n infos = info_table.xpath(xp_info1).strip('\\r\\n \\xc2\\xa0')\n\n if infos:\n info_extras = info_table.xpath(xp_info2)\n\n if info_extras:\n info_extra = []\n\n for extra in info_extras:\n extra = extra.strip('\\r\\n \\xc2\\xa0')\n\n if extra:\n info_extra.append(extra)\n\n if info_extra:\n infos = infos + '\\n' + '\\n'.join(info_extra)\n info.append(infos)\n\n scheduled_str = train.xpath(xp_sched).strip('\\r\\n ')\n prognosis_str = train.xpath(xp_progn).strip('\\r\\n \\xc2\\xa0')\n\n scheduled = unix_time_stamp(stamp, scheduled_str)\n\n if 'fällt' in prognosis_str:\n prognosis = -1\n elif 'pünktlich' in prognosis_str:\n prognosis = scheduled\n else:\n prognosis = unix_time_stamp(stamp, prognosis_str)\n\n schedule['train_info'].append({\n 'route': route,\n 'direction': direction,\n 'destination': destination,\n 'platform': platform,\n 'scheduled': scheduled,\n 'prognosis': prognosis,\n 'info': info\n })\n\n train_count += 1\n\n if train_count >= max_num:\n break\n\n return response(schedule, 200)\n\n\ndef station_by_coords():\n\n try:\n coordinates = request.params.get('coordinates', '').split(',')\n\n if len(coordinates) != 2:\n raise ValueError\n\n coordinates = [float(c) for c in coordinates]\n\n if coordinates[0] < -90 or coordinates[0] > 90:\n raise ValueError\n\n if coordinates[1] < -180 or coordinates[1] > 180:\n raise ValueError\n\n except ValueError:\n return response({'message': 'Invalid coordinates'}, 400)\n\n try:\n max_num = int(request.params.get('maxnum', 0))\n except ValueError:\n max_num = 0\n\n if (max_num < 1):\n max_num = 3\n elif (max_num > 20):\n max_num = 20\n\n distance = {station: int(geopy.distance.distance(v[1], coordinates).m)\n for station, v in stations.items()}\n\n closest = sorted(distance, key=distance.__getitem__)[:max_num]\n\n return response([{\n 'station_id': int(k),\n 'station_name': stations[k][0],\n 'distance': distance[k]\n } for k in closest], 200)\n\n\ndef generic_response(original_handler):\n\n def generic_response_handler(event, context):\n\n resp = original_handler(event, context)\n\n status_code = getattr(resp, 'status_code', 200)\n resp_body = getattr(resp, 'body', None)\n resp_headers = {}\n\n response_object = {\n 'status_code': status_code\n }\n\n if resp_body and isinstance(resp_body, str) and resp_body.startswith(''):\n\n resp_headers['Content-Type'] = 'text/html'\n response_object['body'] = resp_body\n\n else:\n\n if request.method == 'OPTIONS':\n resp_headers['Access-Control-Allow-Methods'] = 'OPTIONS,GET'\n resp_headers['Access-Control-Allow-Headers'] = 'Content-Type,X-Api-Key'\n\n if request.method in ('HEAD', 'OPTIONS', 'GET'):\n resp_headers['Access-Control-Allow-Origin'] = '*'\n\n if resp_body and request.method != 'HEAD':\n\n callback = request.params.get('callback', None)\n\n resp_json = json.dumps(\n resp_body,\n indent=(4 if status_code == 200 and not callback else None)\n )\n\n if '{HOST}' in resp_json:\n\n host = request.headers.get('X-Forwarded-Host', None)\n\n if not host:\n host = request.headers.get('Host', 'api.hostname.com')\n\n for r in (('{HOST}', host), ('{VER}', request.prefix + request.stage)):\n resp_json = resp_json.replace(*r)\n\n if callback:\n resp_json = str(callback) + '(' + resp_json + ')'\n\n resp_headers['Content-Type'] = 'application/json'\n response_object['body'] = resp_json\n\n response_object['headers'] = resp_headers\n\n return response_object\n\n return generic_response_handler\n\n\n@gateway_request\n@gateway_response\n@generic_response\ndef http_request_handler(event, context):\n\n if request.version not in ('HTTP/1.0', 'HTTP/1.1'):\n request.version = 'HTTP/1.0'\n return response({'message': 'Unsupported protocol'}, 505)\n\n if not access():\n return response({'message': 'Access forbidden'}, 403)\n\n if request.method == 'GET' and request.resource in ('home', 'sbahn'):\n try:\n cwd = os.path.dirname(os.path.realpath(__file__))\n with open('{}/{}.html'.format(cwd, request.resource)) as f:\n body = f.read()\n return response(body, 200)\n except Exception:\n return response(None, 404)\n\n if request.method == 'GET' and request.stage == 'favicon.ico':\n return response(None, 404)\n\n if request.method not in ('GET', 'HEAD', 'OPTIONS'):\n return response({'message': 'Method not allowed'}, 405)\n\n if request.stage not in ('v3'):\n return response({'message': 'Unknown API version'}, 501)\n\n if request.resource not in (\n 'index', 'schedule', 'location', 'stations', 'trains', 'routes'):\n return response({'message': 'Resource does not exist.'}, 404)\n\n if request.method == 'HEAD':\n return response(None, 200)\n\n if request.resource == 'index' or (\n request.resource in ('schedule', 'location') and not request.params):\n return response(usage[request.resource], 200)\n\n if request.method == 'OPTIONS':\n return response(None, 200)\n\n if request.resource == 'schedule':\n return scrape_rmv()\n\n if request.resource == 'location':\n return station_by_coords()\n\n if request.resource in ('stations', 'trains', 'routes'):\n return response(globals()[request.resource], 200)\n\n return response({'message': 'Invalid parameters passed'}, 400)\n", "sub_path": "rmv.py", "file_name": "rmv.py", "file_ext": "py", "file_size_in_byte": 11360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "datetime.datetime", "line_number": 31, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 33, "usage_type": "call"}, {"api_name": "trains.routes", "line_number": 40, "usage_type": "name"}, {"api_name": "trains.routes", "line_number": 43, "usage_type": "name"}, {"api_name": "trains.routes", "line_number": 44, "usage_type": "name"}, {"api_name": "trains.routes", "line_number": 47, "usage_type": "name"}, {"api_name": "trains.routes", "line_number": 49, "usage_type": "name"}, {"api_name": "xinetd.request.params.get", "line_number": 57, "usage_type": "call"}, {"api_name": "xinetd.request.params", "line_number": 57, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 57, "usage_type": "name"}, {"api_name": "trains.stations", "line_number": 59, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 60, "usage_type": "call"}, {"api_name": "xinetd.request.params.get", "line_number": 62, "usage_type": "call"}, {"api_name": "xinetd.request.params", "line_number": 62, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 62, "usage_type": "name"}, {"api_name": "trains.trains", "line_number": 65, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 66, "usage_type": "call"}, {"api_name": "xinetd.request.params.get", "line_number": 69, "usage_type": "call"}, {"api_name": "xinetd.request.params", "line_number": 69, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 69, "usage_type": "name"}, {"api_name": "xinetd.request.params.get", "line_number": 79, "usage_type": "call"}, {"api_name": "xinetd.request.params", "line_number": 79, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 79, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 84, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 86, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 106, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 112, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 128, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 129, "usage_type": "attribute"}, {"api_name": "xinetd.response", "line_number": 130, "usage_type": "call"}, {"api_name": "xinetd.response", "line_number": 133, "usage_type": "call"}, {"api_name": "trains.stations", "line_number": 137, "usage_type": "name"}, {"api_name": "lxml.html.html.fromstring", "line_number": 142, "usage_type": "call"}, {"api_name": "lxml.html.html", "line_number": 142, "usage_type": "attribute"}, {"api_name": "lxml.html", "line_number": 142, "usage_type": "name"}, {"api_name": "trains.stations", "line_number": 148, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 218, "usage_type": "call"}, {"api_name": "xinetd.request.params.get", "line_number": 224, "usage_type": "call"}, {"api_name": "xinetd.request.params", "line_number": 224, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 224, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 238, "usage_type": "call"}, {"api_name": "xinetd.request.params.get", "line_number": 241, "usage_type": "call"}, {"api_name": "xinetd.request.params", "line_number": 241, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 241, "usage_type": "name"}, {"api_name": "geopy.distance.distance.distance", "line_number": 250, "usage_type": "call"}, {"api_name": "geopy.distance.distance", "line_number": 250, "usage_type": "attribute"}, {"api_name": "geopy.distance", "line_number": 250, "usage_type": "name"}, {"api_name": "trains.stations.items", "line_number": 251, "usage_type": "call"}, {"api_name": "trains.stations", "line_number": 251, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 255, "usage_type": "call"}, {"api_name": "trains.stations", "line_number": 257, "usage_type": "name"}, {"api_name": "xinetd.request.method", "line_number": 283, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 283, "usage_type": "name"}, {"api_name": "xinetd.request.method", "line_number": 287, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 287, "usage_type": "name"}, {"api_name": "xinetd.request.method", "line_number": 290, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 290, "usage_type": "name"}, {"api_name": "xinetd.request.params.get", "line_number": 292, "usage_type": "call"}, {"api_name": "xinetd.request.params", "line_number": 292, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 292, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 294, "usage_type": "call"}, {"api_name": "xinetd.request.headers.get", "line_number": 301, "usage_type": "call"}, {"api_name": "xinetd.request.headers", "line_number": 301, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 301, "usage_type": "name"}, {"api_name": "xinetd.request.headers.get", "line_number": 304, "usage_type": "call"}, {"api_name": "xinetd.request.headers", "line_number": 304, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 304, "usage_type": "name"}, {"api_name": "xinetd.request.prefix", "line_number": 306, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 306, "usage_type": "name"}, {"api_name": "xinetd.request.stage", "line_number": 306, "usage_type": "attribute"}, {"api_name": "xinetd.request.version", "line_number": 327, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 327, "usage_type": "name"}, {"api_name": "xinetd.request.version", "line_number": 328, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 328, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 329, "usage_type": "call"}, {"api_name": "xinetd.access", "line_number": 331, "usage_type": "call"}, {"api_name": "xinetd.response", "line_number": 332, "usage_type": "call"}, {"api_name": "xinetd.request.method", "line_number": 334, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 334, "usage_type": "name"}, {"api_name": "xinetd.request.resource", "line_number": 334, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 336, "usage_type": "call"}, {"api_name": "os.path", "line_number": 336, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 336, "usage_type": "call"}, {"api_name": "xinetd.request.resource", "line_number": 337, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 337, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 339, "usage_type": "call"}, {"api_name": "xinetd.response", "line_number": 341, "usage_type": "call"}, {"api_name": "xinetd.request.method", "line_number": 343, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 343, "usage_type": "name"}, {"api_name": "xinetd.request.stage", "line_number": 343, "usage_type": "attribute"}, {"api_name": "xinetd.response", "line_number": 344, "usage_type": "call"}, {"api_name": "xinetd.request.method", "line_number": 346, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 346, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 347, "usage_type": "call"}, {"api_name": "xinetd.request.stage", "line_number": 349, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 349, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 350, "usage_type": "call"}, {"api_name": "xinetd.request.resource", "line_number": 352, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 352, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 354, "usage_type": "call"}, {"api_name": "xinetd.request.method", "line_number": 356, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 356, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 357, "usage_type": "call"}, {"api_name": "xinetd.request.resource", "line_number": 359, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 359, "usage_type": "name"}, {"api_name": "xinetd.request.resource", "line_number": 360, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 360, "usage_type": "name"}, {"api_name": "xinetd.request.params", "line_number": 360, "usage_type": "attribute"}, {"api_name": "xinetd.response", "line_number": 361, "usage_type": "call"}, {"api_name": "help.usage", "line_number": 361, "usage_type": "name"}, {"api_name": "xinetd.request.resource", "line_number": 361, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 361, "usage_type": "name"}, {"api_name": "xinetd.request.method", "line_number": 363, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 363, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 364, "usage_type": "call"}, {"api_name": "xinetd.request.resource", "line_number": 366, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 366, "usage_type": "name"}, {"api_name": "xinetd.request.resource", "line_number": 369, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 369, "usage_type": "name"}, {"api_name": "xinetd.request.resource", "line_number": 372, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 372, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 373, "usage_type": "call"}, {"api_name": "xinetd.request.resource", "line_number": 373, "usage_type": "attribute"}, {"api_name": "xinetd.request", "line_number": 373, "usage_type": "name"}, {"api_name": "xinetd.response", "line_number": 375, "usage_type": "call"}, {"api_name": "xinetd.gateway_request", "line_number": 322, "usage_type": "name"}, {"api_name": "xinetd.gateway_response", "line_number": 323, "usage_type": "name"}]} +{"seq_id": "440770286", "text": "from jinja2.filters import do_striptags\nfrom jingo.helpers import fe\nfrom tower import ugettext as _\n\nfrom activity import ActionFormatter\nfrom users.helpers import display_name, profile_url\n\n\nclass ForumReplyFormatter(ActionFormatter):\n def __init__(self, action):\n title = _(u'{user} replied to '\n u'{thread}')\n self.action = action\n self.post = action.content_object\n self.title = fe(title, profile_url=profile_url(self.action.creator),\n user=display_name(self.action.creator),\n post_url=self.action.url,\n thread=self.post.thread.title)\n # 225 was determined by experiment. Feel free to change if the\n # layout changes.\n self.content = self.post.content[0:225]\n\n def __unicode__(self):\n return do_striptags(self.title)\n", "sub_path": "apps/forums/formatters.py", "file_name": "formatters.py", "file_ext": "py", "file_size_in_byte": 923, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "activity.ActionFormatter", "line_number": 9, "usage_type": "name"}, {"api_name": "tower.ugettext", "line_number": 11, "usage_type": "call"}, {"api_name": "jingo.helpers.fe", "line_number": 15, "usage_type": "call"}, {"api_name": "users.helpers.profile_url", "line_number": 15, "usage_type": "call"}, {"api_name": "users.helpers.display_name", "line_number": 16, "usage_type": "call"}, {"api_name": "jinja2.filters.do_striptags", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "232440743", "text": "#\n# Copyright 2018 Shawn Lin\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nfrom datetime import datetime, timedelta\n\n\n# Return related statistics\ndef total_return(returns):\n \"\"\"\n Compute total return from simple returns.\n\n Parameters\n ----------\n returns : pd.Series of periodic returns\n\n Returns\n -------\n total_returns : array-like\n Series of total returns.\n \"\"\"\n if len(returns) < 1:\n return returns.copy()\n\n # Allocate Memory\n result = returns.copy()\n\n # Compute cumulative return\n result = result.add(1, fill_value=0)\n result = result.prod(skipna=True)\n result = result.add(-1)\n\n return result\n\n\ndef cum_return(returns):\n \"\"\"\n Compute cumulative returns from simple returns.\n\n Parameters\n ----------\n returns : pd.Series of periodic returns\n\n Returns\n -------\n cumulative_returns : array-like\n Series of cumulative returns.\n \"\"\"\n if len(returns) < 1:\n return returns.copy()\n\n # Allocate Memory\n result = returns.copy()\n\n # Compute cumulative return\n result = result.add(1, fill_value=0)\n result = result.cumprod(skipna=True)\n result = result.add(-1)\n\n return result\n\n\ndef vami(returns, starting_value=1000):\n \"\"\"\n Compute VAMI (Value Added Monthly Index) from simple returns.\n\n Parameters\n ----------\n returns : pd.Series, np.ndarray, or pd.DataFrame\n Returns of the strategy as a percentage, noncumulative.\n - Time series with decimal returns.\n - Example::\n 2015-07-16 -0.012143\n 2015-07-17 0.045350\n 2015-07-20 0.030957\n 2015-07-21 0.004902\n - Also accepts two dimensional data. In this case, each column is\n cumulated.\n starting_value: float, optional\n The starting returns.\n\n Returns\n -------\n vami : array-like\n Series of cumulative returns.\n \"\"\"\n result = cum_return(returns)\n result = result.add(1)\n result = result.multiply(starting_value)\n\n return result\n\n\ndef weekly_return(returns):\n \"\"\"\n Compute weekly returns from higher frequency returns\n\n Parameters\n ----------\n returns : pd.Series of returns with periodicity shorter than a week\n\n Returns\n -------\n weekly_returns : array-like\n Series of weekly returns.\n \"\"\"\n if len(returns) < 1:\n return returns.copy()\n\n # Allocate Memory\n result = returns.copy()\n\n # Compute cumulative return\n result = result.add(1, fill_value=0)\n result = result.groupby(pd.Grouper(freq=\"W\")).prod()\n result = result - 1\n\n return result\n\n\ndef monthly_return(returns):\n \"\"\"\n Compute monthly returns from higher frequency returns\n\n Parameters\n ----------\n returns : pd.Series of returns with periodicity shorter than a month\n\n Returns\n -------\n monthly_returns : array-like\n Series of monthly returns.\n \"\"\"\n if len(returns) < 1:\n return returns.copy()\n\n # Allocate Memory\n result = returns.copy()\n\n # Compute cumulative return\n result = result.add(1, fill_value=0)\n result = result.groupby(pd.Grouper(freq=\"M\")).prod()\n result = result - 1\n\n return result\n\n\ndef quarterly_return(returns):\n \"\"\"\n Compute quarterly returns from higher frequency returns\n\n Parameters\n ----------\n returns : pd.Series of returns with periodicity shorter than a quarter\n\n Returns\n -------\n quarterly_returns : array-like\n Series of quarterly returns.\n \"\"\"\n if len(returns) < 1:\n return returns.copy()\n\n # Allocate Memory\n result = returns.copy()\n\n # Compute cumulative return\n result = result.add(1, fill_value=0)\n result = result.groupby(pd.Grouper(freq=\"Q\")).prod()\n result = result - 1\n\n return result\n\n\ndef annual_return(returns):\n \"\"\"\n Compute annual returns from higher frequency returns\n\n Parameters\n ----------\n returns : pd.Series of returns with periodicity shorter than a year\n\n Returns\n -------\n annual_returns : array-like\n Series of annual returns.\n \"\"\"\n if len(returns) < 1:\n return returns.copy()\n\n # Allocate Memory\n result = returns.copy()\n\n # Compute cumulative return\n result = result.add(1, fill_value=0)\n result = result.groupby(pd.Grouper(freq=\"A\")).prod()\n result = result - 1\n\n return result\n\n\ndef period_return(returns, period):\n \"\"\"\n Convert higher frequency returns\n\n Parameters\n ----------\n returns : pd.Series of returns with higher frequency than the target\n periodicity\n period : the target periodicity to convert the returns to, options are\n - week\n - month\n - quarter\n - year\n\n Returns\n -------\n monthly_returns : array-like\n Series of annual returns.\n \"\"\"\n return_func = {\n \"week\": weekly_return,\n \"month\": monthly_return,\n \"quarter\": quarterly_return,\n \"year\": annual_return,\n }\n\n result = returns.copy()\n result = return_func[period](returns)\n\n return result\n\n\ndef annualized_return(returns, start_date=None, end_date=None):\n \"\"\"\n Convert periodic returns into annualized return\n\n Parameters\n ----------\n returns : pd.Series of returns\n start_date end_date : string in %Y%m%d. Defaults to None. If given, use\n start or end as the start or end date of the series. This is useful for series\n that's already in a lower frequency (e.g. monthly returns) but the exact\n start or end dates are known. Providing start end in this case will generate\n more accurate annualized returns.\n\n Returns\n -------\n annualized_returns : array-like\n Series of annualized returns.\n \"\"\"\n result = returns.copy()\n\n if start_date is None:\n start_date = returns.index[0]\n else:\n start_date = datetime.strptime(start_date, \"%Y-%m-%d\")\n if end_date is None:\n end_date = returns.index[-1]\n else:\n end_date = datetime.strptime(end_date, \"%Y-%m-%d\")\n\n diff_in_years = ((end_date - start_date).total_seconds() / timedelta(days=365.25).total_seconds())\n\n result = total_return(result)\n result = (result.add(1) ** (1 / diff_in_years)) - 1\n\n return result\n\n\n# Risk related statistics\ndef annualized_std(returns, start_date=None, end_date=None):\n \"\"\"\n Compute annualized standard deviation (volatility) from periodic returns\n\n Parameters\n ----------\n returns : pd.Series of returns\n start_date or end_date : string in %Y%m%d. Defaults to None. If given, use\n start or end as the start or end date of the series. This is useful for series\n that's already in a lower frequency (e.g. monthly returns) but the exact\n start or end dates are known. Providing start end in this case will generate\n more accurate annualized standard deviations.\n\n Returns\n -------\n annualized_returns : array-like\n Series of annualized returns.\n \"\"\"\n result = returns.copy()\n\n if start_date is None:\n start_date = returns.index[0]\n else:\n start_date = datetime.strptime(start_date, \"%Y-%m-%d\")\n if end_date is None:\n end_date = returns.index[-1]\n else:\n end_date = datetime.strptime(end_date, \"%Y-%m-%d\")\n\n diff_in_years = ((end_date - start_date).total_seconds() / timedelta(days=365.25).total_seconds())\n\n result = result.std() * ((result.count() / diff_in_years) ** 0.5)\n\n return result\n", "sub_path": "opat/stats.py", "file_name": "stats.py", "file_ext": "py", "file_size_in_byte": 7966, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.Grouper", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.Grouper", "line_number": 154, "usage_type": "call"}, {"api_name": "pandas.Grouper", "line_number": 181, "usage_type": "call"}, {"api_name": "pandas.Grouper", "line_number": 208, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 269, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 269, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 273, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 273, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 275, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 307, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 307, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 311, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 311, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 313, "usage_type": "call"}]} +{"seq_id": "247358928", "text": "\"\"\"\n数据增、删、改、查\n\"\"\"\nfrom flask import Flask\nimport config\n\n# 导入数据库 ORM\nfrom flask_sqlalchemy import SQLAlchemy\n\n# 初始化一个 flask 框架对象\n# name 参数的作用是方便 flask 寻找资源\napp = Flask(__name__)\n# 导入配置文件中的设置\napp.config.from_object(config)\n# 创建 SQLAlchemy 对象\ndb = SQLAlchemy(app)\n\n\n# 测试数据库配置是否有问题\n# db.create_all()\n\n# 创建 article 表\nclass Article(db.Model):\n __tablename__ = 'article'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n title = db.Column(db.String(100), nullable=False)\n content = db.Column(db.Text, nullable=False)\n\n\n# 创建数据表\ndb.create_all()\n\n\n# 增加数据\n@app.route('/add')\ndef add():\n art1 = Article(title='aaa', content='bbb')\n db.session.add(art1)\n db.session.commit()\n return 'haha'\n\n\n# 数据查找\n@app.route('/find')\ndef find():\n # 使用 session 查找\n # art1 = db.session.query(Article).all()[0]\n # return art1.title\n\n # 使用 Article 类查找\n atr1 = Article.query.filter_by(title='aaa').first()\n return atr1.title\n\n\n# 修改数据\n@app.route('/change')\ndef update():\n atr1 = Article.query.filter_by(title='aaa').first()\n atr1.title = 'zhangsan'\n db.session.commit()\n return atr1.title\n\n\n# 删除\n@app.route('/delete')\ndef delete():\n atr1 = Article.query.filter_by(title='zhangsan').first()\n print(atr1.title)\n db.session.delete(atr1)\n db.session.commit()\n return 'delete'\n\n\nif __name__ == '__main__':\n app.run()", "sub_path": "db_demo1.py", "file_name": "db_demo1.py", "file_ext": "py", "file_size_in_byte": 1551, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "184633441", "text": "from lazyflow.graph import Operator, InputSlot, OutputSlot\n\nimport numpy\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass OpFilterLabels(Operator):\n \"\"\"\n Given a labeled volume, discard labels that have too few pixels.\n Zero is used as the background label\n \"\"\"\n name = \"OpFilterLabels\"\n category = \"generic\"\n\n Input = InputSlot() \n MinLabelSize = InputSlot(stype='int')\n MaxLabelSize = InputSlot(optional=True, stype='int')\n BinaryOut = InputSlot(optional=True, value = False, stype='bool')\n \n Output = OutputSlot()\n \n def setupOutputs(self):\n self.Output.meta.assignFrom(self.Input.meta)\n \n def execute(self, slot, subindex, roi, result):\n minSize = self.MinLabelSize.value\n maxSize = None\n if self.MaxLabelSize.ready():\n maxSize = self.MaxLabelSize.value\n req = self.Input.get(roi)\n req.writeInto(result)\n req.wait()\n \n self.remove_wrongly_sized_connected_components(result, min_size=minSize, max_size=maxSize, in_place=True)\n return result\n \n def propagateDirty(self, inputSlot, subindex, roi):\n # Both input slots can affect the entire output\n assert inputSlot == self.Input or inputSlot == self.MinLabelSize or inputSlot == self.MaxLabelSize\n self.Output.setDirty( slice(None) )\n\n def remove_wrongly_sized_connected_components(self, a, min_size, max_size, in_place):\n \"\"\"\n Adapted from http://github.com/jni/ray/blob/develop/ray/morpho.py\n (MIT License)\n \"\"\"\n bin_out = self.BinaryOut.value\n \n original_dtype = a.dtype\n \n if not in_place:\n a = a.copy()\n if min_size == 0 and (max_size is None or max_size > numpy.prod(a.shape)): # shortcut for efficiency\n return a\n \n try:\n component_sizes = numpy.bincount( a.ravel() )\n except TypeError:\n # On 32-bit systems, must explicitly convert from uint32 to int\n # (This fix is just for VM testing.)\n component_sizes = numpy.bincount( numpy.asarray(a.ravel(), dtype=int) )\n bad_sizes = component_sizes < min_size\n if max_size is not None:\n numpy.logical_or( bad_sizes, component_sizes > max_size, out=bad_sizes )\n \n bad_locations = bad_sizes[a]\n a[bad_locations] = 0\n if (bin_out):\n a[a>0]=1\n return numpy.array(a, dtype=original_dtype)\n\n", "sub_path": "lazyflow/operators/opFilterLabels.py", "file_name": "opFilterLabels.py", "file_ext": "py", "file_size_in_byte": 2500, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "lazyflow.graph.Operator", "line_number": 8, "usage_type": "name"}, {"api_name": "lazyflow.graph.InputSlot", "line_number": 16, "usage_type": "call"}, {"api_name": "lazyflow.graph.InputSlot", "line_number": 17, "usage_type": "call"}, {"api_name": "lazyflow.graph.InputSlot", "line_number": 18, "usage_type": "call"}, {"api_name": "lazyflow.graph.InputSlot", "line_number": 19, "usage_type": "call"}, {"api_name": "lazyflow.graph.OutputSlot", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "240633590", "text": "import asyncio\nimport json\nimport logging\nimport pickle\nimport platform\nimport sys\nimport time\n\nimport aiohttp\nimport discord\nfrom discord.ext.commands import Bot\nfrom discord.ext import commands\nimport numpy as np\n\n# custom datetime with modulo\nfrom cogs.utils.datetime_modulo import datetime\nfrom datetime import timedelta\nimport config\n\n\nlogger = logging.getLogger('discord')\nlogger.setLevel(logging.INFO)\nhandler = logging.FileHandler(\n filename='HatBot.log',\n encoding='utf-8',\n mode='a',\n )\nhandler.setFormatter(logging.Formatter(\n '%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\nlogger.addHandler(handler)\n\n\nasync def create_http_session(loop):\n \"\"\"Creates an async HTTP session. Required to be from an async function\n by aiohttp 3.5.4\"\"\"\n return aiohttp.ClientSession(loop=loop)\n\n\nclass MyBot(Bot):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # background tasks\n game_period = timedelta(hours=1)\n self.bg_game = self.loop.create_task(self.change_game(game_period))\n\n # Create HTTP session\n self.http_session = self.loop.run_until_complete(\n create_http_session(self.loop))\n\n async def close(self):\n \"\"\"Subclasses the close() method to close the HTTP Session.\"\"\"\n await self.http_session.close()\n await super().close()\n\n async def on_ready(self):\n print('Logged in as ' +\n self.user.name +\n ' (ID:' +\n str(self.user.id) +\n ') | Connected to ' +\n str(len(self.guilds)) +\n ' guilds | Connected to ' +\n str(len(set(self.get_all_members()))) +\n ' users')\n print('--------')\n print('Startup Time: {}'.format(datetime.now()))\n print('--------')\n print(('Current Discord.py Version: {} | ' +\n 'Current Python Version: {}').format(discord.__version__,\n platform.python_version()))\n print('--------')\n print('Use this link to invite {}:'.format(self.user.name))\n inv_link = discord.utils.oauth_url(self.user.id)\n print(inv_link.format(self.user.id))\n print('--------')\n\n async def on_message(self, message):\n\n if message.content.startswith(self.command_prefix):\n # run the command, if it is one\n await self.process_commands(message)\n\n async def on_reaction_add(self, reaction, user):\n message = reaction.message\n if not user.bot and not message.author.bot:\n emoji = reaction.emoji\n r = np.random.randint(10)\n if r == 0:\n await asyncio.sleep(2)\n await message.add_reaction(emoji)\n\n async def change_game(self, period):\n \"\"\"\n Input\n -----\n period : timedelta\n Period of the message.\n \"\"\"\n if not isinstance(period, timedelta):\n raise ValueError('period {:f} is not timedelta'.format(period))\n\n await self.wait_until_ready()\n\n while not self.is_closed():\n with open('games.json', 'r') as f:\n games = json.load(f)['games']\n game_name = np.random.choice(games)\n await self.change_presence(activity=discord.Game(name=game_name))\n await asyncio.sleep(period.total_seconds())\n\n\nif __name__ == '__main__':\n\n if 'win32' in sys.platform:\n asyncio.set_event_loop(asyncio.ProactorEventLoop())\n\n loop = asyncio.get_event_loop()\n\n bot = MyBot(\n description='HatBot by Snaptraks#2606',\n command_prefix='!',\n help_command=commands.DefaultHelpCommand(dm_help=True),\n loop=loop,\n )\n\n # This specifies what extensions to load when the bot starts up\n startup_extensions = [\n 'cogs.Admin',\n 'cogs.Dev',\n 'cogs.Feesh',\n 'cogs.Fun',\n 'cogs.Git',\n 'cogs.Info',\n 'cogs.Levels',\n 'cogs.Minigames',\n 'cogs.Moderation',\n 'cogs.Poll',\n 'cogs.Responses',\n 'cogs.Roles',\n ]\n\n for extension in startup_extensions:\n try:\n bot.load_extension(extension)\n except Exception as e:\n exc = '{}: {}'.format(type(e).__name__, e)\n print('Failed to load extension {}\\n{}'.format(extension, exc))\n\n loop.run_until_complete(bot.start(config.hatbot_token))\n", "sub_path": "HatBot.py", "file_name": "HatBot.py", "file_ext": "py", "file_size_in_byte": 4441, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 28, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 36, "usage_type": "call"}, {"api_name": "discord.ext.commands.Bot", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 44, "usage_type": "call"}, {"api_name": "cogs.utils.datetime_modulo.datetime.now", "line_number": 67, "usage_type": "call"}, {"api_name": "cogs.utils.datetime_modulo.datetime", "line_number": 67, "usage_type": "name"}, {"api_name": "discord.__version__", "line_number": 70, "usage_type": "attribute"}, {"api_name": "platform.python_version", "line_number": 71, "usage_type": "call"}, {"api_name": "discord.utils.oauth_url", "line_number": 74, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 88, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 90, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 100, "usage_type": "argument"}, {"api_name": "json.load", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 108, "usage_type": "attribute"}, {"api_name": "discord.Game", "line_number": 109, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 110, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 115, "usage_type": "attribute"}, {"api_name": "asyncio.set_event_loop", "line_number": 116, "usage_type": "call"}, {"api_name": "asyncio.ProactorEventLoop", "line_number": 116, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 118, "usage_type": "call"}, {"api_name": "discord.ext.commands.DefaultHelpCommand", "line_number": 123, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 123, "usage_type": "name"}, {"api_name": "config.hatbot_token", "line_number": 150, "usage_type": "attribute"}]} +{"seq_id": "554401355", "text": "import time,datetime\nimport numpy as np\nimport cv2\nimport pickle as pk\nfrom Obj.Obs_Graph import Obs_Graph\nfrom tools.prepare_data import prepare_data\nfrom config import Config\nfrom image2video import img2mp4\nfrom mot_func.mot_pre_association import mot_pre_association\nfrom mot_func.mot_window_pre_association import mot_window_pre_association\nfrom mot_func.MOT_Initialization_Tracklets import MOT_Initialization_Tracklets\nfrom mot_func.MOT_Window_Initialization_Tracklets import MOT_Window_Initialization_Tracklets\nfrom mot_func.MOT_Window_Association import MOT_Window_Association\nfrom mot_func.MOT_Global_Association import MOT_Global_Association\nfrom mot_func.MOT_Confidence_Update import MOT_Confidence_Update\nfrom mot_func.MOT_Type_Update import MOT_Type_Update\nfrom mot_func.MOT_State_Update import MOT_State_Update\nfrom mot_func.MOT_Generation_Tracklets import MOT_Generation_Tracklets\nfrom mot_func.MOT_Tracking_Results import MOT_Tracking_Results\nfrom mot_func.mot_count_ids import mot_count_ids\nfrom mot_func.MOT_Init_Tracklets_Generation import MOT_Init_Tracklets_Generation\nfrom mot_func.mot_appearance_model_generation import mot_appearance_model_generation\n\nfrom tools.MOT_Draw_Tracking import MOT_Tracking_Reauslt_Realtime\nfrom tools.fileGiant import clear_subfile\nfrom tools.ListGiant import ListInsert\n\nfrom Obj.Obs_Graph import Kalman_Filter\n\nprint(\"config param generated...\")\nparam = Config()\nprint('Loading detections...')\nprepare_data(param)\ndetections = param.detections\nimg_List = param.img_List\n# 1:ILDA, 0: No-ILDA (faster)\n# To use ILDA, refer to README.\nparam.use_ILDA = 0\nframe_start = 0\nif len(param.img_List) > 10:\n frame_end = len(detections)\nelse:\n frame_end = 10\n\nAll_Eval = []\ncct = 0\n#this variable is used to record every state of tracklets in every frame and finally use Trk_sets to draw and show the tracklets\nTrk_sets = []\nall_mot = []\n#to record relations between detections in current and privious frames\n#Obs_grap = []\ninit_img_set = np.zeros((param.imgSeq_lenth,param.imgsize[0],param.imgsize[1],param.imgsize[2]))\na_model_list = []\n#window information\nObs_grap_window = []\nTracklets_window = []\n## Initiailization Tracklet\nstart_time = time.time()\ninit_frame = frame_start + param.show_scan + 1\n#window tracklet generated\nwindow_start_frame = param.tracking_start_frame\nwindow_end_frame = window_start_frame+param.window_length-1\nfor fr in range(0,window_end_frame+1):\n filename = param.img_path + img_List[fr]\n bgrimg = cv2.imread(filename)\n b,g,r = cv2.split(bgrimg)\n rgbimg = cv2.merge([r,g,b])\n init_img_set[fr] = rgbimg\n frame_a_models = []\n if fr > window_start_frame-1:\n for i in range(0,len(detections[fr])):\n det = detections[fr][i]\n det_a_model = mot_appearance_model_generation(rgbimg,param,det[2:-1])\n frame_a_models.append(det_a_model)\n #a_model_list.append(frame_a_models)\n ListInsert(a_model_list,fr,frame_a_models,[])\nprint(window_start_frame,window_end_frame)\nfor i in range(0,window_end_frame+1):\n num_det = len(detections[i])\n obs_grap = Obs_Graph(num_det)\n Obs_grap_window.append(obs_grap)\n Obs_grap_window[i].iso_idx = np.array(np.arange(0,len(detections[i])))\n Obs_grap_window[i].kalman_filter = [Kalman_Filter()]*len(detections[i])\n Obs_grap_window[i].child = [-1]*num_det\n Obs_grap_window[i].iso_child = []\nnum_det = len(detections[0])\nObs_grap_window[0].child = [-1]*num_det\n\nprint(\"Tracklets_window number is:\",len(Tracklets_window))\n#clear ./result subfiles\nclear_subfile(\"./result/\")\n#define current frame and window_frame\n## Tracking\nfor fr in range(window_end_frame,frame_end):\n print('Tracking:Frame_{}'.format(fr))\n if fr == 329:\n print(11111)\n filename = param.img_path+param.img_List[fr]\n bgrimg = cv2.imread(filename)\n b,g,r = cv2.split(bgrimg)\n rgbimg = cv2.merge([r,g,b])\n init_img_set[fr] = rgbimg\n frame_a_models = []\n for i in range(0,len(detections[fr])):\n det = detections[fr][i]\n det_a_model = mot_appearance_model_generation(rgbimg,param,det[2:-1])\n frame_a_models.append(det_a_model)\n ListInsert(a_model_list,fr,frame_a_models,[])\n #a_model_list.append(frame_a_models)\n \n if fr+1 > len(Obs_grap_window):\n num_det = len(detections[fr])\n obs_grap = Obs_Graph(num_det)\n obs_grap.child = [-1]*num_det \n Obs_grap_window.append(obs_grap)\n Obs_grap_window[fr].iso_idx = np.array(np.arange(0,num_det))\n #Obs_grap_window[fr].child = [-1]*len(num_det)\n \"\"\"***************Window Move*******************\"\"\"\n #generate new tracklet in the window\n Tracklets_window,param,Obs_grap_window = MOT_Generation_Tracklets(a_model_list,init_img_set,Tracklets_window,detections,param,Obs_grap_window,fr)\n Tracklets_window = MOT_State_Update(Tracklets_window,param,fr)\n Tracklets_window = MOT_Type_Update(rgbimg,Tracklets_window,param,fr)\n #Tracklets_window = MOT_Confidence_Update(Tracklets_window,param,fr)\n \"\"\"^^^^^^^^^^^^^^^Window Tracklets Association^^^^^^^^^^^^^^^^^^^\"\"\"\n #after tracklet generated in the window,we start to do association between tracklet before and tracklet in window\n Tracklets_window,Obs_grap_window = MOT_Window_Association(Tracklets_window,param,fr,detections,Obs_grap_window)\n \"\"\"**********************************\"\"\"\n if param.use_ILDA:\n ILDA = MOT_Online_Appearance_Learning(rgbimg,img_path,img_List,fr,Trk,param,ILDA)\n ## Tracking Results\n Trk_sets = MOT_Tracking_Results(Tracklets_window,Trk_sets,fr-param.window_length+1,param)\n if param.draw_while_track:\n MOT_Tracking_Reauslt_Realtime(Trk_sets,fr-param.window_length+1,param)\n\n##count the ids of the rest Tracklet\nfor tracklet in Tracklets_window:\n mot_count_ids(tracklet,param)\nprint(\"Tracking Done...\")\nend_time = time.time()\nspend_time = end_time-start_time\nprint(\"Tracking: Total Time:{}|FPS :{}\".format(spend_time,len(param.img_List)/spend_time))\nprint(\"Total Tracklet:{} |Total Object:{}\".format(param.total_tracklet_count,param.object_count))\nprint(\"TT-TO: {}\".format(param.total_tracklet_count-param.object_count))\nprint(\"IDS:{}\".format(param.ids))\n\nprint(\"mp4 result is generating.....\")\nimg2mp4(param)\nprint(\"mp4 result is completed!\")\nif param.draw_while_track == False:\n pass\n", "sub_path": "tracking_demo.py", "file_name": "tracking_demo.py", "file_ext": "py", "file_size_in_byte": 6337, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "config.Config", "line_number": 31, "usage_type": "call"}, {"api_name": "tools.prepare_data.prepare_data", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "time.time", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 67, "usage_type": "call"}, {"api_name": "mot_func.mot_appearance_model_generation.mot_appearance_model_generation", "line_number": 73, "usage_type": "call"}, {"api_name": "tools.ListGiant.ListInsert", "line_number": 76, "usage_type": "call"}, {"api_name": "Obj.Obs_Graph.Obs_Graph", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 82, "usage_type": "call"}, {"api_name": "Obj.Obs_Graph.Kalman_Filter", "line_number": 83, "usage_type": "call"}, {"api_name": "tools.fileGiant.clear_subfile", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 99, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 101, "usage_type": "call"}, {"api_name": "mot_func.mot_appearance_model_generation.mot_appearance_model_generation", "line_number": 106, "usage_type": "call"}, {"api_name": "tools.ListGiant.ListInsert", "line_number": 108, "usage_type": "call"}, {"api_name": "Obj.Obs_Graph.Obs_Graph", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 116, "usage_type": "call"}, {"api_name": "mot_func.MOT_Generation_Tracklets.MOT_Generation_Tracklets", "line_number": 120, "usage_type": "call"}, {"api_name": "mot_func.MOT_State_Update.MOT_State_Update", "line_number": 121, "usage_type": "call"}, {"api_name": "mot_func.MOT_Type_Update.MOT_Type_Update", "line_number": 122, "usage_type": "call"}, {"api_name": "mot_func.MOT_Window_Association.MOT_Window_Association", "line_number": 126, "usage_type": "call"}, {"api_name": "mot_func.MOT_Tracking_Results.MOT_Tracking_Results", "line_number": 131, "usage_type": "call"}, {"api_name": "tools.MOT_Draw_Tracking.MOT_Tracking_Reauslt_Realtime", "line_number": 133, "usage_type": "call"}, {"api_name": "mot_func.mot_count_ids.mot_count_ids", "line_number": 137, "usage_type": "call"}, {"api_name": "time.time", "line_number": 139, "usage_type": "call"}, {"api_name": "image2video.img2mp4", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "506018155", "text": "from django.http import HttpResponse\r\nfrom django.http import Http404\r\nfrom django.apps import AppConfig\r\n\r\nfrom django.shortcuts import get_object_or_404,render,redirect\r\nfrom django.contrib.auth import authenticate,login,logout\r\nfrom django.views.generic import View\r\nfrom django.views import generic\r\nfrom django.core.urlresolvers import reverse_lazy\r\nfrom .forms import UserForm\r\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\r\nfrom .models import Category\r\nfrom .models import Products\r\nfrom .models import Cart,ProductOrder\r\nfrom django.contrib.auth.models import User\r\nfrom django.http import HttpResponseRedirect\r\nimport operator\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\n\r\nfrom django.db.models import Q\r\n\r\ndef index ( request ):\r\n all_category=Category.objects.all()\r\n p=Products.objects.all()\r\n query=request.GET.get(\"q\")\r\n if query:\r\n query_list = query.split()\r\n result = result.filter(\r\n reduce(operator.and_,\r\n (Q(product_name=q) for q in query_list)) |\r\n reduce(operator.and_,\r\n (Q(content_brand=q) for q in query_list))\r\n )\r\n context={'all_category' : all_category,'user':request.user,'p':p,}\r\n return render(request, 'products/index.html', context)\r\n\r\n\r\n\r\ndef detail(request , category_id):\r\n category=get_object_or_404(Category,pk=category_id)\r\n\r\n c=Category.objects.all()\r\n product=Products.objects.select_related().filter(product_category = category_id)\r\n paginator=Paginator(product,6)\r\n page=request.GET.get('page')\r\n try:\r\n product=paginator.page(page)\r\n except PageNotAnInteger:\r\n product=paginator.page(1)\r\n except EmptyPage:\r\n product=paginator.page(paginator.num_pages)\r\n\r\n return render(request, 'products/detail.html', {'category': category, 'product':product,'c':c,'user':request.user,})\r\n\r\n\r\n\r\ndef product_detail(request,category_id ,product_id):\r\n c=get_object_or_404(Category,pk= category_id)\r\n all_category=Category.objects.all()\r\n p=get_object_or_404(Products,pk=product_id)\r\n products=Products.objects.all()\r\n page_title=p.product_name\r\n meta_description=p.product_description\r\n if request.method =='POST':\r\n postdata=request.POST.copy()\r\n form=ProductAddToCartForm(request,postdata)\r\n if form.is_valid():\r\n cart.add_to_cart(request)\r\n if request.session.test_cookie_worked():\r\n request.session.delete_test_cookie()\r\n url = urlresolvers.reverse('show_cart')\r\n return HttpResponseRedirect(url)\r\n else:\r\n form=ProductAddToCartForm(request=request, label_suffix=':')\r\n form.fields['product_id'].widget.attrs['value']=product_id\r\n request.session.set_test_cookie()\r\n return render(request , 'products/product_detail.html',{'p':p,'all_category':all_category,'user':request.user,'c':c,'products':products})\r\n\r\ndef search(request):\r\n query = request.GET.get(\"q\")\r\n p=Products.objects.filter(product_name=query)\r\n\r\n return render(request,'products/search.html',{'p':p})\r\n\r\n\r\nclass UserFormView(View):\r\n form_class=UserForm\r\n template_name='products/registration_form.html'\r\n all_category=Category.objects.all()\r\n def get(self,request):\r\n form = self.form_class(None)\r\n return render(request,self.template_name,{'form':form})\r\n def post(self,request):\r\n form=self.form_class(request.POST)\r\n if form.is_valid():\r\n user =form.save(commit=False)\r\n username=form.cleaned_data['username']\r\n password=form.cleaned_data['password']\r\n user.set_password(password)\r\n user.save()\r\n user=authenticate(username=username,password=password)\r\n\r\n if user is not None:\r\n if user.is_active:\r\n login(request,user)\r\n return redirect('products:index')\r\n\r\n\r\n return render(request,self.template_name,{'form':form,'all_category':all_category,'user':request.user})\r\n\r\ndef add_to_cart(request,product_id):\r\n if request.user.is_authenticated():\r\n try:\r\n product=Products.objects.get(pk=product_id)\r\n except ObjectDoesNotExist:\r\n pass\r\n else:\r\n try:\r\n cart=Cart.objects.get(user=request.user,active=True)\r\n except ObjectDoesNotExist:\r\n cart=Cart.objects.create(user=request.user)\r\n cart.add_to_cart(product_id)\r\n return redirect('products:cart')\r\n else:\r\n return redirect('products:index')\r\n\r\ndef remove_from_cart(request,product_id):\r\n if request.user.is_authenticated():\r\n try:\r\n product=Products.objects.get(pk=product_id)\r\n except ObjectDoesNotExist:\r\n pass\r\n else:\r\n cart=Cart.objects.get(user=request.user,active=True)\r\n cart.remove_from_cart(product_id)\r\n return redirect('products:cart')\r\n else:\r\n return redirect('products:index')\r\n\r\ndef cart(request):\r\n if request.user.is_authenticated():\r\n cart=Cart.objects.filter(user=request.user.id,active=True)\r\n orders = ProductOrder.objects.filter(cart=cart)\r\n total=0\r\n count=0\r\n for order in orders:\r\n total += (order.product.product_price *order.quantity)\r\n count += order.quantity\r\n context = {'cart':cart,'total':total,'count':count,'orders':orders}\r\n return render(request , 'products/cart.html', context)\r\n else:\r\n return redirect('products:index')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "products/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5692, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "models.Category.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 23, "usage_type": "name"}, {"api_name": "models.Products.objects.all", "line_number": 24, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 24, "usage_type": "name"}, {"api_name": "operator.and_", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.db.models.Q", "line_number": 30, "usage_type": "call"}, {"api_name": "operator.and_", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.db.models.Q", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 40, "usage_type": "argument"}, {"api_name": "models.Category.objects.all", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 42, "usage_type": "name"}, {"api_name": "models.Products.objects.select_related", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 43, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 44, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 48, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 50, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 53, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 58, "usage_type": "argument"}, {"api_name": "models.Category.objects.all", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 59, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Products", "line_number": 60, "usage_type": "argument"}, {"api_name": "models.Products.objects.all", "line_number": 61, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 61, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 77, "usage_type": "call"}, {"api_name": "models.Products.objects.filter", "line_number": 81, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 81, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 83, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 86, "usage_type": "name"}, {"api_name": "forms.UserForm", "line_number": 87, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 89, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 89, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 92, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 101, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 105, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 106, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 109, "usage_type": "call"}, {"api_name": "models.Products.objects.get", "line_number": 114, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 114, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 115, "usage_type": "name"}, {"api_name": "models.Cart.objects.get", "line_number": 119, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 119, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 119, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 120, "usage_type": "name"}, {"api_name": "models.Cart.objects.create", "line_number": 121, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 121, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 123, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 125, "usage_type": "call"}, {"api_name": "models.Products.objects.get", "line_number": 130, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 130, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 130, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 131, "usage_type": "name"}, {"api_name": "models.Cart.objects.get", "line_number": 134, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 134, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 134, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 136, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 138, "usage_type": "call"}, {"api_name": "models.Cart.objects.filter", "line_number": 142, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 142, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 142, "usage_type": "name"}, {"api_name": "models.ProductOrder.objects.filter", "line_number": 143, "usage_type": "call"}, {"api_name": "models.ProductOrder.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "models.ProductOrder", "line_number": 143, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 150, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "566430345", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Pooyan Safari\n\"\"\"\n\nfrom __future__ import unicode_literals, print_function, division\nfrom io import open\nimport sys\nimport unicodedata\nimport string\nimport re\nimport random\nimport argparse\nimport time\nimport math\nimport pickle\nimport pdb\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport numpy as np\nfrom sklearn.metrics import precision_recall_fscore_support, confusion_matrix, accuracy_score\n\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom DataLoader import DataLoader\nimport Constants\nfrom encoders import *\nfrom classifiers import *\nfrom masked_cross_entropy import *\n\nplt.switch_backend('agg')\n\ndef trainBatch(src_batch, src_lengths, src_lengths_sorted_idx,tgt_batch, tgt_lengths,tgt_lengths_sorted_idx ,labels,mask_src, mask_tgt, encoder_src, encoder_tgt, classifier,encoder_src_optimizer, encoder_tgt_optimizer, classifier_optimizer,criterion,opt):\n ''' operation on each mini-batch in training phase ''' \n # Turn padded array tgt_batch(batch_size,tgt_len) tensors, into target_var(tgt_len,batch_size) \n if opt.cuda:\n input_var = Variable(torch.cuda.LongTensor(src_batch)).transpose(0, 1)\n target_var = Variable(torch.cuda.LongTensor(tgt_batch)).transpose(0, 1)\n label_var = Variable(torch.cuda.LongTensor(labels))\n else:\n input_var = Variable(torch.LongTensor(src_batch)).transpose(0, 1)\n target_var = Variable(torch.LongTensor(tgt_batch)).transpose(0, 1)\n # remove the .cuda here to run on CPU\n label_var = Variable(torch.LongTensor(labels))\n\n # Zero gradients of both optimizers\n encoder_src_optimizer.zero_grad()\n encoder_tgt_optimizer.zero_grad()\n classifier_optimizer.zero_grad()\n loss = 0 \n # Run words through encoder\n encoder_src_outputs, encoder_src_hidden = encoder_src(input_var, src_lengths) #encoder_src_outputs(src_len+,batch,hidden_size) encoder_src_hidden(num_directions,batch,hidden_size) decoder_hidden(1,batch,hidden_size)\n encoder_tgt_outputs, encoder_tgt_hidden = encoder_tgt(target_var, tgt_lengths) #encoder_tgt_outputs(src_len+,batch,hidden_size) encoder_tgt_hidden(num_directions,batch,hidden_size) decoder_hidden(1,batch,hidden_size)\n #apply averaging here\n encoder_src_outputs = encoder_src_outputs.sum(0,keepdim=True) / src_lengths.unsqueeze(1).expand(1,encoder_src_outputs.size(1),encoder_src_outputs.size(2)).float() #encoder_src_outputs(1,batch,hidden_size)\n encoder_tgt_outputs = encoder_tgt_outputs.sum(0,keepdim=True) / tgt_lengths.unsqueeze(1).expand(1,encoder_tgt_outputs.size(1),encoder_tgt_outputs.size(2)).float() #encoder_tgt_outputs(1,batch,hidden_size)\n #reorder to original \n encoder_src_outputs = encoder_src_outputs[:,src_lengths_sorted_idx.sort()[1],:].permute(1,0,2).squeeze(1) #encoder_src_outputs(batch,hidden_size)\n encoder_tgt_outputs = encoder_tgt_outputs[:,tgt_lengths_sorted_idx.sort()[1],:].permute(1,0,2).squeeze(1) #encoder_tgt_outputs(batch,hidden_size)\n \n #classifier\n pred = classifier(encoder_src_outputs.clone(),encoder_tgt_outputs.clone())\n #Loss calculation and backpropagation\n #label_var_onehot = torch.zeros(pred.size(0),pred.size(1)).cuda() #debug\n #for i in range(2): #debug\n # if i==0:\n # label_var_onehot[label_var==0,i]=1 #debug\n # else:\n # label_var_onehot[label_var==1,i]=1 #debug\n #loss = criterion(pred.clone(),label_var_onehot.clone())\n loss = criterion(pred.clone(),label_var)\n \n loss.backward()\n\n # clip_grad_norm for gradient clipping\n if opt.clip!=0:\n torch.nn.utils.clip_grad_norm(encoder_src.parameters(), opt.clip) #turn off for debugging\n torch.nn.utils.clip_grad_norm(encoder_tgt.parameters(), opt.clip) #turn off for debugging\n \n # Update parameters with optimizers\n encoder_src_optimizer.step()\n encoder_tgt_optimizer.step()\n classifier_optimizer.step()\n return loss\n\ndef trainEpoch(training_data,encoder_src, encoder_tgt,classifier,encoder_src_optimizer,encoder_tgt_optimizer,classifier_optimizer,criterion,opt):\n ''' Epoch operation in training phase '''\n encoder_src.train()\n encoder_tgt.train()\n classifier.train()\n total_loss = 0\n batch_num = 0\n for batch in training_data:\n src, tgt,labels = batch\n src_no_BOS_EOS = src[0][:,1:-1]\n tgt_no_BOS_EOS = tgt[0][:,1:-1]\n src_no_BOS_EOS[src[0][:,1:-1]==Constants.EOS] = Constants.PAD\n tgt_no_BOS_EOS[tgt[0][:,1:-1]==Constants.EOS] = Constants.PAD\n src_lengths = src_no_BOS_EOS.ne(Constants.PAD).sum(1)\n tgt_lengths = tgt_no_BOS_EOS.ne(Constants.PAD).sum(1)\n src_lengths_sorted,src_lengths_sorted_idx = src_lengths.sort(descending=True)\n tgt_lengths_sorted,tgt_lengths_sorted_idx = tgt_lengths.sort(descending=True)\n src_sorted = src_no_BOS_EOS[src_lengths_sorted_idx,:] #not include on the source side\n tgt_sorted = tgt_no_BOS_EOS[tgt_lengths_sorted_idx,:]\n #tgt2src_lengths_sorted = tgt_lengths[src_lengths_sorted_idx]\n #sequence_mask function automatically moves the output to cuda if sequence_length is on cuda \n mask_src = sequence_mask(sequence_length=src_lengths_sorted).float() # mask_src(batch, src_len+) \n mask_tgt = sequence_mask(sequence_length=tgt_lengths_sorted).float() # mask_tgt(batch, tgt_len+)\n loss = trainBatch(src_batch=src_sorted, #src_batch(batch,src_len+)\n src_lengths = src_lengths_sorted, #src_lengths(batch,) this length counts src_len+\n src_lengths_sorted_idx = src_lengths_sorted_idx,\n tgt_batch=tgt_sorted, #tgt_batch(batch,+tgt_len+) this includs both and but only one is used for decoding (target size is one more than target length)\n tgt_lengths = tgt_lengths_sorted, #tgt_lengths_sorted(batch,) this length counts tgt_len+\n tgt_lengths_sorted_idx = tgt_lengths_sorted_idx,\n labels = labels,\n mask_src = mask_src, # mask_src(batch, src_len+)\n mask_tgt = mask_tgt, # mask_tgt(batch, tgt_len+)\n encoder_src=encoder_src,\n encoder_tgt=encoder_tgt,\n classifier = classifier,\n encoder_src_optimizer=encoder_src_optimizer,\n encoder_tgt_optimizer=encoder_tgt_optimizer,\n classifier_optimizer = classifier_optimizer,\n criterion=criterion,\n opt=opt)\n \n batch_num += 1\n total_loss += loss.item()\n \n return total_loss / batch_num\n\ndef main():\n ''' Main function '''\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-data', required=True)\n parser.add_argument('-save_path', required=True)\n parser.add_argument('-save_every', type=int,default=5,help='save the model after this amount of epoch')\n parser.add_argument('-validate_every', type=int, default=1,help='compute loss, perplexity and accuracy on the validation data')\n parser.add_argument('-sample_every', type=int, default=5,help='translate a sample batch of validation data')\n parser.add_argument('-epoch', type=int, default=10)\n parser.add_argument('-learning_rate', type=float, default=.0001)\n parser.add_argument('-batch_size', type=int, default=64)\n parser.add_argument('-embedding_size', type=int, default=512)\n parser.add_argument('-pretrained_embedding_src', default=None,help='use a pretrained embedding for source')\n parser.add_argument('-pretrained_embedding_tgt', default=None,help='use a pretrained embedding for target')\n parser.add_argument('-maxlen', type=int,default=50)\n parser.add_argument('-hidden_size', type=int, default=100)\n parser.add_argument('-n_layers', type=int, default=1)\n parser.add_argument('-teacher_forcing_ratio', type=float, default=0.5)\n parser.add_argument('-dropout', type=float, default=0.1)\n parser.add_argument('-clip', type=float, default=0.0,help='gradient clipping helps prevent the exploding gradient problem') \n parser.add_argument('-log', default=None)\n parser.add_argument('-weight_decay', type=float, default=0,help='L2 regularization')\n parser.add_argument('-attn_model', type=str,choices=['dot','general','concat','variational'],default='dot')\n parser.add_argument('-cuda', action='store_true',help='use cuda gpu, by default it is set to false.')\n parser.add_argument('-eps_sample_train', type=int, default=1,help='number of random samples for variational attention in the training phase')\n parser.add_argument('-eps_sample_eval', type=int, default=1,help='number of random samples for variational attention in the test phase')\n parser.add_argument('-var_hidden_size', type=int, default=1,help='hidden_size of variational network')\n\n opt = parser.parse_args()\n if torch.cuda.is_available() and opt.cuda:\n opt.cuda = True\n else:\n opt.cuda = False\n #========= Loading Dataset =========#\n data = torch.load(opt.data)\n opt.max_token_seq_len = data['settings'].max_token_seq_len # from serialized data\n\n #========= Preparing DataLoader =========#\n training_data = DataLoader(data['dict']['src'], # src_word2idx\n data['dict']['tgt'],\n src_insts=data['train']['src'], # word instances transformed into sequences of word index\n tgt_insts=data['train']['tgt'],\n labels=data['train']['labels'],\n batch_size=opt.batch_size,\n cuda=opt.cuda)\n\n validation_data = DataLoader(data['dict']['src'],\n data['dict']['tgt'],\n src_insts=data['valid']['src'],\n tgt_insts=data['valid']['tgt'],\n labels=data['valid']['labels'],\n batch_size=opt.batch_size,\n shuffle=False,\n test=True,\n cuda=opt.cuda)\n\n opt.src_vocab_size = training_data.src_vocab_size\n opt.tgt_vocab_size = training_data.tgt_vocab_size\n\n # all the (hyper-)parameters\n print(opt)\n\n #========= Preparing Model =========#\n # Initialize models (from encoders.py)\n encoder_src = EncoderRNN(opt.src_vocab_size,\n opt.embedding_size, \n opt.hidden_size,\n opt,\n pretrained_embedding=opt.pretrained_embedding_src)\n encoder_tgt = EncoderRNN(opt.tgt_vocab_size,\n opt.embedding_size,\n opt.hidden_size,\n opt,\n pretrained_embedding=opt.pretrained_embedding_tgt)\n\n #==== feed-forward classifier=====\n classifier = classifierMLP(4*opt.hidden_size,opt.hidden_size,2,dropout=opt.dropout)\n \n # Initialize optimizers and criterion\n #TODO:add learning rate picking strategies\n #optimizer = ScheduledOptim(optim.Adam(filter(lambda x: x.requires_grad, transformer.parameters()),betas=(0.9, 0.98), eps=1e-09),opt.d_model, opt.n_warmup_steps)\n encoder_src_optimizer = optim.Adam(encoder_src.parameters(), lr=opt.learning_rate, weight_decay=opt.weight_decay)\n encoder_tgt_optimizer = optim.Adam(encoder_tgt.parameters(), lr=opt.learning_rate, weight_decay=opt.weight_decay)\n classifier_optimizer = optim.Adam(classifier.parameters(), lr=opt.learning_rate, weight_decay=opt.weight_decay)\n \n criterion = nn.CrossEntropyLoss()\n #criterion = nn.MSELoss()\n # Move models to GPU\n if opt.cuda:\n encoder_src.cuda()\n encoder_tgt.cuda()\n classifier.cuda()\n \n best_model_acc = 0\n\n for epoch in range(opt.epoch):\n start_time = time.time()\n print('[Start training Epoch', epoch, ']')\n # average loss per epoch\n epoch_loss_avg = trainEpoch(training_data,encoder_src,encoder_tgt,classifier,encoder_src_optimizer,encoder_tgt_optimizer,classifier_optimizer,criterion,opt)\n\n print('Epoch: %s avg-training loss: %8.2f elapse-time: %3.3f' % (epoch,epoch_loss_avg,(time.time()-start_time)/60))\n\n checkpoint = {\n 'encoder_src': encoder_src.state_dict(), #save state information of encoder \n 'encoder_tgt': encoder_tgt.state_dict(), #save state information of decoder\n 'classifier': classifier.state_dict(), #save state information of decoder\n 'settings': opt,\n 'epoch': epoch}\n\n #save model \n if epoch % opt.save_every == 0:\n model_name = opt.save_path + '_epoch_{epoch:d}.chkpt'.format(epoch=epoch)\n torch.save(checkpoint, model_name)\n\n #validation\n if epoch % opt.validate_every==0:\n with torch.no_grad():\n encoder_src_eval = EncoderRNN(opt.src_vocab_size,\n opt.embedding_size,\n opt.hidden_size,\n opt,\n pretrained_embedding=opt.pretrained_embedding_src)\n encoder_tgt_eval = EncoderRNN(opt.tgt_vocab_size,\n opt.embedding_size,\n opt.hidden_size,\n opt,\n pretrained_embedding=opt.pretrained_embedding_tgt)\n classifier_eval = classifierMLP(4*opt.hidden_size,opt.hidden_size,2) # without dropout\n\n encoder_src_eval.load_state_dict(checkpoint['encoder_src'])\n encoder_tgt_eval.load_state_dict(checkpoint['encoder_tgt'])\n classifier_eval.load_state_dict(checkpoint['classifier'])\n\n if opt.cuda:\n encoder_src_eval.cuda()\n encoder_tgt_eval.cuda()\n classifier_eval.cuda()\n\n encoder_src_eval.eval() \n encoder_tgt_eval.eval()\n classifier_eval.eval()\n\n total_validate_loss = 0\n total_validate_correct = 0\n validate_batch_num = 0\n total_validation_predicted_words = 0\n all_pred_list = []\n all_labels_list = []\n validate_start = time.time()\n\n for batch in validation_data:\n src, tgt, labels = batch\n src_no_BOS_EOS = src[0][:,1:-1]\n tgt_no_BOS_EOS = tgt[0][:,1:-1]\n src_no_BOS_EOS[src[0][:,1:-1]==Constants.EOS] = Constants.PAD\n tgt_no_BOS_EOS[tgt[0][:,1:-1]==Constants.EOS] = Constants.PAD\n src_lengths = src_no_BOS_EOS.ne(Constants.PAD).sum(1)\n tgt_lengths = tgt_no_BOS_EOS.ne(Constants.PAD).sum(1)\n src_lengths_sorted,src_lengths_sorted_idx = src_lengths.sort(descending=True)\n tgt_lengths_sorted,tgt_lengths_sorted_idx = tgt_lengths.sort(descending=True)\n src_sorted = src_no_BOS_EOS[src_lengths_sorted_idx,:] #not include on the source side\n tgt_sorted = tgt_no_BOS_EOS[tgt_lengths_sorted_idx,:]\n #sequence_mask function automatically moves the output to cuda if sequence_length is on cuda \n mask_src = sequence_mask(sequence_length=src_lengths_sorted).float() # mask_src(batch, src_len+) \n mask_tgt = sequence_mask(sequence_length=tgt_lengths_sorted).float() # mask_tgt(batch, tgt_len+)\n enc_src_outputs, encoder_src_hidden = encoder_src_eval(src_sorted.transpose(0, 1),src_lengths_sorted) #\n enc_tgt_outputs, encoder_tgt_hidden = encoder_tgt_eval(tgt_sorted.transpose(0, 1),tgt_lengths_sorted) #\n #apply averaging here\n enc_src_outputs = enc_src_outputs.sum(0,keepdim=True) / src_lengths_sorted.unsqueeze(1).expand(1,enc_src_outputs.size(1),enc_src_outputs.size(2)).float() #enc_src_outputs(1,batch,hidden_size)\n enc_tgt_outputs = enc_tgt_outputs.sum(0,keepdim=True) / tgt_lengths_sorted.unsqueeze(1).expand(1,enc_tgt_outputs.size(1),enc_tgt_outputs.size(2)).float() #enc_tgt_outputs(1,batch,hidden_size)\n #reorder to original \n enc_src_outputs = enc_src_outputs[:,src_lengths_sorted_idx.sort()[1],:].permute(1,0,2).squeeze(1) #enc_src_outputs(batch,hidden_size)\n enc_tgt_outputs = enc_tgt_outputs[:,tgt_lengths_sorted_idx.sort()[1],:].permute(1,0,2).squeeze(1) #enc_tgt_outputs(batch,hidden_size)\n \n pred = classifier_eval(enc_src_outputs.clone(),enc_tgt_outputs.clone())\n all_pred_list.append(pred.max(1)[1])\n all_labels_list.append(labels)\n #compute loss and accuracy here or after masking\n #label_var_onehot = torch.zeros(pred.size(0),pred.size(1)).cuda() #debug\n #for i in range(2): #debug\n # if i==0:\n # label_var_onehot[labels==0,i]=1 #debug\n # else:\n # label_var_onehot[labels==1,i]=1 #debug\n \n validate_batch_loss = criterion(pred.clone(),labels).item()\n #validate_batch_loss = criterion(pred.clone(),label_var_onehot.clone()).item()\n total_validate_correct += pred.max(1)[1].eq(labels).sum().item()\n total_validation_predicted_words += pred.size(0) \n validate_batch_num += 1 \n total_validate_loss += validate_batch_loss\n \n valid_loss = total_validate_loss / validate_batch_num\n valid_accu = total_validate_correct / total_validation_predicted_words\n all_pred = torch.cat(all_pred_list)\n all_labels = torch.cat(all_labels_list)\n #[0] is precision, [1] is recall, [2] is F measure, [3] is the number of instances in each class not the correctly predicted instances\n\n accuracy = accuracy_score(all_labels,all_pred)\n precision = precision_recall_fscore_support(all_labels,all_pred, average=None)[0]\n recall = precision_recall_fscore_support(all_labels,all_pred, average=None)[1]\n samples_per_class = precision_recall_fscore_support(all_labels,all_pred, average=None)[3]\n F1_score = precision_recall_fscore_support(all_labels,all_pred, average=None)[2]\n F1_score_average_micro = precision_recall_fscore_support(all_labels,all_pred, average='micro')[2]\n F1_score_average_macro = precision_recall_fscore_support(all_labels,all_pred, average='macro')[2]\n if best_model_acc < accuracy:\n best_model_acc = accuracy\n model_name = opt.save_path + '_best.chkpt'\n torch.save(checkpoint, model_name)\n print('best model accuracy: ',accuracy)\n all_pred_name = opt.save_path + '_best_all_preds.txt'\n all_labels_name = opt.save_path + '_best_all_labels.txt'\n with open(all_pred_name, 'w') as filehandle:\n for listitem in all_pred:\n filehandle.write('%s\\n' % listitem.item())\n with open(all_labels_name, 'w') as filehandle:\n for listitem in all_labels:\n filehandle.write('%s\\n' % listitem.item())\n #confusion matrix: row true, column prediction\n conf_mat = confusion_matrix(all_labels,all_pred)\n print(' - (Validation) loss: {val_loss: 8.5f}, ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %,''elapse: {elapse:3.3f} min'.format(val_loss=valid_loss,ppl=math.exp(min(valid_loss, 100)), accu=100*valid_accu, elapse=(time.time()-validate_start)/60))\n print('accuracy: ',accuracy)\n print('precision: ',precision)\n print('recall: ',recall)\n print('number of instances per class in the ground truth: ',samples_per_class)\n print('F1-score: ',F1_score)\n print('F1-score_average_micro: ',F1_score_average_micro)\n print('F1-score_average_macro: ',F1_score_average_macro)\n print('confusion-matrix:\\n',conf_mat)\n \n \nif __name__ == '__main__':\n main()\n\n\n", "sub_path": "end2end_neural_classifiers/feed-forward/nwa/nwa.py", "file_name": "nwa.py", "file_ext": "py", "file_size_in_byte": 21047, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.switch_backend", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.cuda.LongTensor", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.cuda.LongTensor", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.cuda.LongTensor", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.clip_grad_norm", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "attribute"}, {"api_name": "Constants.EOS", "line_number": 102, "usage_type": "attribute"}, {"api_name": "Constants.PAD", "line_number": 102, "usage_type": "attribute"}, {"api_name": "Constants.EOS", "line_number": 103, "usage_type": "attribute"}, {"api_name": "Constants.PAD", "line_number": 103, "usage_type": "attribute"}, {"api_name": "Constants.PAD", "line_number": 104, "usage_type": "attribute"}, {"api_name": "Constants.PAD", "line_number": 105, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 167, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 172, "usage_type": "call"}, {"api_name": "DataLoader.DataLoader", "line_number": 176, "usage_type": "call"}, {"api_name": "DataLoader.DataLoader", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 219, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 219, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 220, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 221, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 223, "usage_type": "name"}, {"api_name": "time.time", "line_number": 234, "usage_type": "call"}, {"api_name": "time.time", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 251, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 255, "usage_type": "call"}, {"api_name": "time.time", "line_number": 287, "usage_type": "call"}, {"api_name": "Constants.EOS", "line_number": 293, "usage_type": "attribute"}, {"api_name": "Constants.PAD", "line_number": 293, "usage_type": "attribute"}, {"api_name": "Constants.EOS", "line_number": 294, "usage_type": "attribute"}, {"api_name": "Constants.PAD", "line_number": 294, "usage_type": "attribute"}, {"api_name": "Constants.PAD", "line_number": 295, "usage_type": "attribute"}, {"api_name": "Constants.PAD", "line_number": 296, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 333, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 334, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 337, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 338, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 339, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 340, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 341, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 342, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 343, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 347, "usage_type": "call"}, {"api_name": "io.open", "line_number": 351, "usage_type": "call"}, {"api_name": "io.open", "line_number": 354, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 358, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 359, "usage_type": "call"}, {"api_name": "time.time", "line_number": 359, "usage_type": "call"}]} +{"seq_id": "611989557", "text": "from numpy import *\nfrom matplotlib import pyplot as plt\n\n\n#加载数据\ndef loadDataSet(fileName):\n dataMat = []\n fr = open(fileName)\n for line in fr.readlines():\n curLine = line.strip().split('\\t')\n fltLine = map(float, curLine)\n dataMat.append(fltLine)\n return dataMat\n\n#计算两个向量的距离,用的是欧几里得距离\ndef distEclud(vecA, vecB):\n return sqrt(sum(power(vecA - vecB, 2)))\n\n#随机生成初始的质心\ndef randCent(dataSet, k):\n m, dim = dataSet.shape #数据维度为dim(N)\n centroids = zeros((k, dim))\n for i in range(k):\n index = int(random.uniform(0, m))\n centroids[i, :] = dataSet[index, :]\n return centroids\n\n#定义Kmeans函数\ndef kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):\n m, dim = dataSet.shape\n clusterAssment = mat(zeros((m,dim)))\n centroids = createCent(dataSet, k)\n clusterChanged = True\n while clusterChanged:\n clusterChanged = False\n for i in range(m):\n minDist = inf\n minIndex = -1\n for j in range(k):\n distJI = distMeas(centroids[j,:],dataSet[i,:])\n if distJI < minDist:\n minDist = distJI; minIndex = j\n if clusterAssment[i,0] != minIndex:\n clusterChanged = True\n clusterAssment[i,:] = minIndex,minDist**2\n print (centroids)\n for cent in range(k):\n ptsInClust = dataSet[nonzero(clusterAssment[:,0].A==cent)[0]]\n centroids[cent,:] = mean(ptsInClust, axis=0)\n return centroids, clusterAssment\n\n#将聚类结果显示\ndef show(dataSet, k, centroids, clusterAssment):\n m, dim = dataSet.shape\n if dim != 2:\n print ('Sorry! I can not draw because the dimension of your data is not 2!') #不能显示高于2维的聚类结果\n return 1\n mark = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', ' len(mark):\n print ('Sorry! Your k is too large! please contact Zouxy') #质心数太多不能显示\n return 1\n\n for i in range(m):\n markIndex = int(clusterAssment[i, 0])\n plt.plot(dataSet[i, 0], dataSet[i, 1], mark[markIndex]) #选择两个维度进行显示聚类结果\n\n mark = ['Dr', 'Db', 'Dg', 'Dk', '^b', '+b', 'sb', 'db', ' 0:\n field_names=listObj[0].keys()\n for entry in listObj:\n row_object={}\n for field in field_names:\n if type(entry[field]) in [OrderedDict, dict]:\n row_object.update(entry[field])\n else:\n row_object[field] = entry[field]\n temp_list.append(row_object)\n field_names=temp_list[0].keys()\n writer=csv.DictWriter(temp_file,\n fieldnames=field_names,\n extrasaction='ignore')\n writer.writeheader()\n writer.writerows(temp_list)\n return_value=temp_file.getvalue()\n except Exception as e:\n return_value=str(e)\n finally:\n return return_value\n\nclass UJsonRenderer(BaseRenderer):\n \"\"\" Uses ujson instead of json to serialize an object \"\"\"\n media_type = 'application/json'\n format = 'json'\n\n def render(self, obj, media_type=None, renderer_context=None):\n return ujson.dumps(obj, ensure_ascii=True, escape_forward_slashes=False)\n", "sub_path": "main/renderers.py", "file_name": "renderers.py", "file_ext": "py", "file_size_in_byte": 1731, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "rest_framework.renderers.BaseRenderer", "line_number": 9, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 16, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 25, "usage_type": "name"}, {"api_name": "csv.DictWriter", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.renderers.BaseRenderer", "line_number": 42, "usage_type": "name"}, {"api_name": "ujson.dumps", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "449808908", "text": "#------------------------------------------------------------------------------\n# Copyright (c) 2013, Nucleic Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#------------------------------------------------------------------------------\nimport sys\nfrom setuptools import setup, find_packages, Extension\n\n\next_modules = [\n Extension(\n 'enaml.weakmethod',\n ['enaml/src/weakmethod.cpp'],\n language='c++',\n ),\n Extension(\n 'enaml.callableref',\n ['enaml/src/callableref.cpp'],\n language='c++',\n ),\n Extension(\n 'enaml.signaling',\n ['enaml/src/signaling.cpp'],\n language='c++',\n ),\n Extension(\n 'enaml.core.funchelper',\n ['enaml/src/funchelper.cpp'],\n language='c++',\n ),\n Extension(\n 'enaml.colorext',\n ['enaml/src/colorext.cpp'],\n language='c++',\n ),\n Extension(\n 'enaml.fontext',\n ['enaml/src/fontext.cpp'],\n language='c++',\n ),\n Extension(\n 'enaml.core.dynamicscope',\n ['enaml/src/dynamicscope.cpp'],\n language='c++',\n ),\n Extension(\n 'enaml.core.alias',\n ['enaml/src/alias.cpp'],\n language='c++',\n )\n]\n\n\nif sys.platform == 'win32':\n ext_modules.append(\n Extension(\n 'enaml.winutil',\n ['enaml/src/winutil.cpp'],\n libraries=['user32', 'gdi32'],\n language='c++'\n )\n )\n\n\nsetup(\n name='enaml',\n version='0.9.8',\n author='The Nucleic Development Team',\n author_email='sccolbert@gmail.com',\n url='https://github.com/nucleic/enaml',\n description='Declarative DSL for building rich user interfaces in Python',\n long_description=open('README.rst').read(),\n requires=['atom', 'PyQt', 'ply', 'kiwisolver'],\n install_requires=['distribute', 'atom >= 0.3.8', 'kiwisolver >= 0.1.2', 'ply >= 3.4'],\n packages=find_packages(),\n package_data={\n 'enaml.applib': ['*.enaml'],\n 'enaml.stdlib': ['*.enaml'],\n 'enaml.workbench.core': ['*.enaml'],\n 'enaml.workbench.ui': ['*.enaml'],\n 'enaml.qt.docking': [\n 'dock_images/*.png',\n 'dock_images/*.py',\n 'enaml_dock_resources.qrc'\n ],\n },\n entry_points={'console_scripts': ['enaml-run = enaml.runner:main']},\n ext_modules=ext_modules,\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2466, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "setuptools.Extension", "line_number": 13, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 18, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 23, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 28, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 33, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 38, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 43, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 56, "usage_type": "attribute"}, {"api_name": "setuptools.Extension", "line_number": 58, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 67, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "631166469", "text": "import os\nimport numpy as np\nimport cv2\nfrom pycocotools.coco import COCO\nfrom pycocotools.mask import *\nimport argparse\nimport json\nimport multiprocessing as mp\n\n# def get_parser():\n# parser = argparse.ArgumentParser(description='segmentation tracking verify protocol')\n# parser.add_argument('--json', type=str, help='json file path')\n# parser.add_argument('--flow_maps', type=str, help='flow_maps directory')\n# parser.add_argument('--pair_list', type=str, help='pair list directory')\n# return parser.parse_args()\n\ndef img_iou(pair):\n fn1, fn2 = pair\n img1 = cv2.imread(fn1, cv2.IMREAD_GRAYSCALE).astype(np.uint8)\n\n img2 = cv2.imread(fn2, cv2.IMREAD_GRAYSCALE).astype(np.uint8)\n # print(img1.shape)\n #print(img2.shape)\n #print(fn2)\n e_mask = encode(np.asfortranarray(img1))\n # print(type(e_mask[0]))\n e_new_mask = encode(np.asfortranarray(img2))\n map_iou = iou([e_mask], [e_new_mask], [0])\n return map_iou[0][0]\n\ndef main():\n pd_base = '/shared/xudongliu/code/semi-flow/hd3/pd_mask/bdd-KT-val-new'\n gt_base = '/shared/xudongliu/code/semi-flow/mask'\n list_file = '/shared/xudongliu/code/pytorch-liteflownet/lists/seg_track_val_new.txt'\n # args = get_parser()\n\n # load json file in coco format\n with open(list_file) as f:\n image_list = f.readlines()\n \n args = []\n for i, line in enumerate(image_list):\n gt_name = os.path.join(gt_base, line.strip(' \\n').split(' ')[0].split('.')[0] + '.png')\n pd_name = os.path.join(pd_base, line.strip(' \\n').split(' ')[0].split('.')[0] + '.png')\n args.append([gt_name, pd_name])\n\n pool = mp.Pool(16)\n iou_list = pool.map(img_iou, args)\n iou_list = np.array(iou_list)\n print(iou_list.mean())\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "iou_eval.py", "file_name": "iou_eval.py", "file_ext": "py", "file_size_in_byte": 1781, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "cv2.imread", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.asfortranarray", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.asfortranarray", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "multiprocessing.Pool", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "451034668", "text": "# -*- coding: utf-8 -*-\n\nfrom os import path\nfrom uuid import uuid4\n\nfrom flask import (Blueprint, abort, current_app, flash, render_template,\n request)\nfrom flask_babel import _\nfrom flask_login import current_user, fresh_login_required, login_required\nfrom pandas import read_excel\n\nfrom recordit.decorators import permission_required, role_required\nfrom recordit.extensions import db\nfrom recordit.forms.course import (AddCourseAdministratorForm,\n AddCourseTeacherForm, AddReportBatchForm,\n AddReportForm, DeleteCourseForm,\n DeleteRecordForm, DeleteReportForm,\n EditCourseForm, EditReportForm)\nfrom recordit.models import Course, RecordTable, Report, Role, User\nfrom recordit.utils.error import warn_teacher\nfrom recordit.utils.file import receive_file\nfrom recordit.utils.link import redirect_back\n\ncourse_bp = Blueprint('course', __name__)\n\n\n@course_bp.before_request\n@fresh_login_required\n@login_required\ndef login_protect():\n pass\n\n\n@course_bp.route('/manage')\n@permission_required('MODERATOR_COURSE')\ndef manage_course():\n per_page = current_app.config['MANAGE_COURSE_PER_PAGE']\n page = request.args.get('page', 1, type=int)\n pagination = Course.query.filter_by(teacher_id=current_user.id).order_by(\n Course.date.desc()).paginate(page, per_page)\n if current_user.is_admin:\n pagination = Course.query.order_by(\n Course.date.desc()).paginate(page, per_page)\n\n form = DeleteCourseForm()\n return render_template('course/manage_course.html', pagination=pagination, form=form)\n\n\n@course_bp.route('manage//switch-state')\n@permission_required('MODERATOR_COURSE')\ndef switch_course_state(course_id):\n course = Course.query.get_or_404(course_id)\n warn_teacher(course.teacher_id)\n\n course.active = not course.active\n db.session.commit()\n\n for report in Report.query.filter_by(course_id=course.id).all():\n report.active = not report.active\n db.session.commit()\n\n flash(_('Course state switched.'), 'success')\n return redirect_back()\n\n\n@course_bp.route('manage/add', methods=['GET', 'POST'])\n@permission_required('MODERATOR_COURSE')\ndef add_course():\n form = AddCourseTeacherForm()\n if current_user.is_admin:\n form = AddCourseAdministratorForm()\n\n if form.validate_on_submit():\n teacher_id = (current_user.id if current_user.is_teacher\n else User.search_user(number=form.teacher.data).id)\n\n course = Course(\n teacher_id=teacher_id,\n name=form.name.data,\n grade=form.grade.data,\n remark=form.remark.data\n )\n db.session.add(course)\n db.session.commit()\n\n flash(_('Add course success.'), 'success')\n return redirect_back()\n\n return render_template('course/add_course.html', form=form)\n\n\n@course_bp.route('manage//edit', methods=['GET', 'POST'])\n@permission_required('MODERATOR_REPORT')\ndef edit_course(course_id):\n course = Course.query.get_or_404(course_id)\n\n form = EditCourseForm()\n if form.validate_on_submit():\n course.name = form.name.data\n course.grade = form.grade.data\n course.remark = form.remark.data\n\n db.session.commit()\n flash(_('Edit course success.'), 'success')\n return redirect_back()\n\n form.name.data = course.name\n form.grade.data = course.grade\n form.remark.data = course.remark\n\n return render_template('course/edit_course.html', form=form)\n\n\n@course_bp.route('manage//delete', methods=['POST'])\n@permission_required('MODERATOR_REPORT')\ndef delete_course(course_id):\n course = Course.query.get_or_404(course_id)\n warn_teacher(course.teacher_id)\n\n db.session.delete(course)\n db.session.commit()\n\n flash(_('Course deleted.'), 'info')\n return redirect_back()\n\n\n@course_bp.route('manage//report')\n@permission_required('MODERATOR_REPORT')\ndef manage_report(course_id):\n course = Course.query.get_or_404(course_id)\n warn_teacher(course.teacher_id)\n\n per_page = current_app.config['MANAGE_REPORT_PER_PAGE']\n page = request.args.get('page', 1, type=int)\n pagination = Report.query.filter_by(course_id=course_id).order_by(\n Report.date.desc()).paginate(page, per_page)\n form = DeleteReportForm()\n\n return render_template('course/manage_report.html', pagination=pagination, form=form)\n\n\n@course_bp.route('manage/report//switch-state')\n@permission_required('MODERATOR_REPORT')\ndef switch_report_state(report_id):\n report = Report.query.get_or_404(report_id)\n warn_teacher(report.teacher_id)\n\n report.active = not report.active\n db.session.commit()\n\n flash(_('Report state switched.'), 'success')\n return redirect_back()\n\n\n@course_bp.route('manage/report//add', methods=['GET', 'POST'])\n@permission_required('MODERATOR_REPORT')\ndef add_report(course_id):\n form = AddReportForm()\n course = Course.query.get_or_404(course_id)\n warn_teacher(course.teacher_id)\n\n if form.validate_on_submit():\n report = Report(\n course_id=course_id,\n name=form.name.data,\n remark=form.remark.data\n )\n for number in form.speaker.data.split(','):\n speaker = User.search_user(number)\n report.speakers.append(speaker)\n\n db.session.add(report)\n db.session.commit()\n\n flash(_('Add Report success.'), 'success')\n return redirect_back()\n\n return render_template('course/add_report.html', form=form)\n\n\n@course_bp.route('manage/report//batch-add', methods=['GET', 'POST'])\n@permission_required('MODERATOR_REPORT')\ndef add_report_batch(course_id):\n form = AddReportBatchForm()\n if form.validate_on_submit():\n df = read_excel(receive_file())\n\n if not set(df.columns) >= set(['name', 'number', 'remark']):\n flash(\n _(\"The EXCEL file columns should contain 'name', 'number' and 'remark'.\"), 'error')\n return redirect_back()\n\n course = Course.query.get_or_404(course_id)\n for i, row in df.iterrows():\n number = row['number']\n if isinstance(number, (int, float)):\n number = str(int(number))\n\n user = User.search_user(number)\n if user:\n\n # TODO: mulity speaker replace single speaker\n report = Report(\n course_id=course_id,\n speaker_id=user.id,\n name=row['name'],\n remark=row['remark']\n )\n db.session.add(report)\n db.session.commit()\n else:\n flash(_(\"%(number)s is not existed.\", number=number), 'error')\n\n flash(_('Add Report success.'), 'success')\n return redirect_back()\n\n return render_template('course/add_report_batch.html', form=form)\n\n\n@course_bp.route('manage/report//edit', methods=['GET', 'POST'])\n@permission_required('MODERATOR_REPORT')\ndef edit_report(report_id):\n report = Report.query.get_or_404(report_id)\n form = EditReportForm()\n if form.validate_on_submit():\n report.name = form.name.data\n report.remark = form.remark.data\n db.session.commit()\n\n flash(_('Edit Report success.'), 'success')\n return redirect_back()\n\n form.name.data = report.name\n form.remark.data = report.remark\n\n return render_template('course/edit_report.html', form=form)\n\n\n@course_bp.route('manage/report//delete', methods=['POST'])\n@permission_required('MODERATOR_REPORT')\ndef delete_report(report_id):\n report = Report.query.get_or_404(report_id)\n warn_teacher(report.teacher_id)\n\n db.session.delete(report)\n db.session.commit()\n\n flash(_('Report deleted.'), 'info')\n return redirect_back()\n\n\n@course_bp.route('manage/record-table/')\n@permission_required('MODERATOR_RECORD_TABLE')\ndef manage_record(report_id):\n report = Report.query.get_or_404(report_id)\n warn_teacher(report.teacher_id)\n\n per_page = current_app.config['MANAGE_RECORD_TABLE_PER_PAGE']\n page = request.args.get('page', 1, type=int)\n pagination = RecordTable.query.filter_by(report_id=report_id).order_by(\n RecordTable.time.desc()).paginate(page, per_page)\n form = DeleteRecordForm()\n\n return render_template('course/manage_record.html', pagination=pagination, form=form)\n\n\n@course_bp.route('manage/record-table/', methods=['POST'])\n@permission_required('MODERATOR_RECORD_TABLE')\ndef delete_record(record_id):\n record = RecordTable.query.get_or_404(record_id)\n warn_teacher(record.teacher_id)\n\n db.session.delete(record)\n db.session.commit()\n\n flash(_('Record Table deleted.'), 'info')\n return redirect_back()\n", "sub_path": "recordit/blueprints/course.py", "file_name": "course.py", "file_ext": "py", "file_size_in_byte": 8949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Blueprint", "line_number": 24, "usage_type": "call"}, {"api_name": "flask_login.fresh_login_required", "line_number": 28, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "recordit.models.Course.query.filter_by", "line_number": 39, "usage_type": "call"}, {"api_name": "recordit.models.Course.query", "line_number": 39, "usage_type": "attribute"}, {"api_name": "recordit.models.Course", "line_number": 39, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 39, "usage_type": "name"}, {"api_name": "recordit.models.Course.date.desc", "line_number": 40, "usage_type": "call"}, {"api_name": "recordit.models.Course.date", "line_number": 40, "usage_type": "attribute"}, {"api_name": "recordit.models.Course", "line_number": 40, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_admin", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 41, "usage_type": "name"}, {"api_name": "recordit.models.Course.query.order_by", "line_number": 42, "usage_type": "call"}, {"api_name": "recordit.models.Course.query", "line_number": 42, "usage_type": "attribute"}, {"api_name": "recordit.models.Course", "line_number": 42, "usage_type": "name"}, {"api_name": "recordit.models.Course.date.desc", "line_number": 43, "usage_type": "call"}, {"api_name": "recordit.models.Course.date", "line_number": 43, "usage_type": "attribute"}, {"api_name": "recordit.models.Course", "line_number": 43, "usage_type": "name"}, {"api_name": "recordit.forms.course.DeleteCourseForm", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 46, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 35, "usage_type": "call"}, {"api_name": "recordit.models.Course.query.get_or_404", "line_number": 52, "usage_type": "call"}, {"api_name": "recordit.models.Course.query", "line_number": 52, "usage_type": "attribute"}, {"api_name": "recordit.models.Course", "line_number": 52, "usage_type": "name"}, {"api_name": "recordit.utils.error.warn_teacher", "line_number": 53, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session.commit", "line_number": 56, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 56, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 56, "usage_type": "name"}, {"api_name": "recordit.models.Report.query.filter_by", "line_number": 58, "usage_type": "call"}, {"api_name": "recordit.models.Report.query", "line_number": 58, "usage_type": "attribute"}, {"api_name": "recordit.models.Report", "line_number": 58, "usage_type": "name"}, {"api_name": "recordit.extensions.db.session.commit", "line_number": 60, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 60, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 62, "usage_type": "call"}, {"api_name": "flask_babel._", "line_number": 62, "usage_type": "call"}, {"api_name": "recordit.utils.link.redirect_back", "line_number": 63, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 50, "usage_type": "call"}, {"api_name": "recordit.forms.course.AddCourseTeacherForm", "line_number": 69, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_admin", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 70, "usage_type": "name"}, {"api_name": "recordit.forms.course.AddCourseAdministratorForm", "line_number": 71, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_teacher", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 74, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 74, "usage_type": "attribute"}, {"api_name": "recordit.models.User.search_user", "line_number": 75, "usage_type": "call"}, {"api_name": "recordit.models.User", "line_number": 75, "usage_type": "name"}, {"api_name": "recordit.models.Course", "line_number": 77, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session.add", "line_number": 83, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 83, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 83, "usage_type": "name"}, {"api_name": "recordit.extensions.db.session.commit", "line_number": 84, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 84, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 86, "usage_type": "call"}, {"api_name": "flask_babel._", "line_number": 86, "usage_type": "call"}, {"api_name": "recordit.utils.link.redirect_back", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 89, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 67, "usage_type": "call"}, {"api_name": "recordit.models.Course.query.get_or_404", "line_number": 95, "usage_type": "call"}, {"api_name": "recordit.models.Course.query", "line_number": 95, "usage_type": "attribute"}, {"api_name": "recordit.models.Course", "line_number": 95, "usage_type": "name"}, {"api_name": "recordit.forms.course.EditCourseForm", "line_number": 97, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session.commit", "line_number": 103, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 103, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 103, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 104, "usage_type": "call"}, {"api_name": "flask_babel._", "line_number": 104, "usage_type": "call"}, {"api_name": "recordit.utils.link.redirect_back", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 111, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 93, "usage_type": "call"}, {"api_name": "recordit.models.Course.query.get_or_404", "line_number": 117, "usage_type": "call"}, {"api_name": "recordit.models.Course.query", "line_number": 117, "usage_type": "attribute"}, {"api_name": "recordit.models.Course", "line_number": 117, "usage_type": "name"}, {"api_name": "recordit.utils.error.warn_teacher", "line_number": 118, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session.delete", "line_number": 120, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 120, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 120, "usage_type": "name"}, {"api_name": "recordit.extensions.db.session.commit", "line_number": 121, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 121, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 121, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 123, "usage_type": "call"}, {"api_name": "flask_babel._", "line_number": 123, "usage_type": "call"}, {"api_name": "recordit.utils.link.redirect_back", "line_number": 124, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 115, "usage_type": "call"}, {"api_name": "recordit.models.Course.query.get_or_404", "line_number": 130, "usage_type": "call"}, {"api_name": "recordit.models.Course.query", "line_number": 130, "usage_type": "attribute"}, {"api_name": "recordit.models.Course", "line_number": 130, "usage_type": "name"}, {"api_name": "recordit.utils.error.warn_teacher", "line_number": 131, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 133, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 133, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "name"}, {"api_name": "recordit.models.Report.query.filter_by", "line_number": 135, "usage_type": "call"}, {"api_name": "recordit.models.Report.query", "line_number": 135, "usage_type": "attribute"}, {"api_name": "recordit.models.Report", "line_number": 135, "usage_type": "name"}, {"api_name": "recordit.models.Report.date.desc", "line_number": 136, "usage_type": "call"}, {"api_name": "recordit.models.Report.date", "line_number": 136, "usage_type": "attribute"}, {"api_name": "recordit.models.Report", "line_number": 136, "usage_type": "name"}, {"api_name": "recordit.forms.course.DeleteReportForm", "line_number": 137, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 139, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 128, "usage_type": "call"}, {"api_name": "recordit.models.Report.query.get_or_404", "line_number": 145, "usage_type": "call"}, {"api_name": "recordit.models.Report.query", "line_number": 145, "usage_type": "attribute"}, {"api_name": "recordit.models.Report", "line_number": 145, "usage_type": "name"}, {"api_name": "recordit.utils.error.warn_teacher", "line_number": 146, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session.commit", "line_number": 149, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 149, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 151, "usage_type": "call"}, {"api_name": "flask_babel._", "line_number": 151, "usage_type": "call"}, {"api_name": "recordit.utils.link.redirect_back", "line_number": 152, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 143, "usage_type": "call"}, {"api_name": "recordit.forms.course.AddReportForm", "line_number": 158, "usage_type": "call"}, {"api_name": "recordit.models.Course.query.get_or_404", "line_number": 159, "usage_type": "call"}, {"api_name": "recordit.models.Course.query", "line_number": 159, "usage_type": "attribute"}, {"api_name": "recordit.models.Course", "line_number": 159, "usage_type": "name"}, {"api_name": "recordit.utils.error.warn_teacher", "line_number": 160, "usage_type": "call"}, {"api_name": "recordit.models.Report", "line_number": 163, "usage_type": "call"}, {"api_name": "recordit.models.User.search_user", "line_number": 169, "usage_type": "call"}, {"api_name": "recordit.models.User", "line_number": 169, "usage_type": "name"}, {"api_name": "recordit.extensions.db.session.add", "line_number": 172, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 172, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 172, "usage_type": "name"}, {"api_name": "recordit.extensions.db.session.commit", "line_number": 173, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 173, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 173, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 175, "usage_type": "call"}, {"api_name": "flask_babel._", "line_number": 175, "usage_type": "call"}, {"api_name": "recordit.utils.link.redirect_back", "line_number": 176, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 178, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 156, "usage_type": "call"}, {"api_name": "recordit.forms.course.AddReportBatchForm", "line_number": 184, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 186, "usage_type": "call"}, {"api_name": "recordit.utils.file.receive_file", "line_number": 186, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 189, "usage_type": "call"}, {"api_name": "flask_babel._", "line_number": 190, "usage_type": "call"}, {"api_name": "recordit.utils.link.redirect_back", "line_number": 191, "usage_type": "call"}, {"api_name": "recordit.models.Course.query.get_or_404", "line_number": 193, "usage_type": "call"}, {"api_name": "recordit.models.Course.query", "line_number": 193, "usage_type": "attribute"}, {"api_name": "recordit.models.Course", "line_number": 193, "usage_type": "name"}, {"api_name": "recordit.models.User.search_user", "line_number": 199, "usage_type": "call"}, {"api_name": "recordit.models.User", "line_number": 199, "usage_type": "name"}, {"api_name": "recordit.models.Report", "line_number": 203, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session.add", "line_number": 209, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 209, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 209, "usage_type": "name"}, {"api_name": "recordit.extensions.db.session.commit", "line_number": 210, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 210, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 210, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 212, "usage_type": "call"}, {"api_name": "flask_babel._", "line_number": 212, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 214, "usage_type": "call"}, {"api_name": "flask_babel._", "line_number": 214, "usage_type": "call"}, {"api_name": "recordit.utils.link.redirect_back", "line_number": 215, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 217, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 182, "usage_type": "call"}, {"api_name": "recordit.models.Report.query.get_or_404", "line_number": 223, "usage_type": "call"}, {"api_name": "recordit.models.Report.query", "line_number": 223, "usage_type": "attribute"}, {"api_name": "recordit.models.Report", "line_number": 223, "usage_type": "name"}, {"api_name": "recordit.forms.course.EditReportForm", "line_number": 224, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session.commit", "line_number": 228, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 228, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 228, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 230, "usage_type": "call"}, {"api_name": "flask_babel._", "line_number": 230, "usage_type": "call"}, {"api_name": "recordit.utils.link.redirect_back", "line_number": 231, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 236, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 221, "usage_type": "call"}, {"api_name": "recordit.models.Report.query.get_or_404", "line_number": 242, "usage_type": "call"}, {"api_name": "recordit.models.Report.query", "line_number": 242, "usage_type": "attribute"}, {"api_name": "recordit.models.Report", "line_number": 242, "usage_type": "name"}, {"api_name": "recordit.utils.error.warn_teacher", "line_number": 243, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session.delete", "line_number": 245, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 245, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 245, "usage_type": "name"}, {"api_name": "recordit.extensions.db.session.commit", "line_number": 246, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 246, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 246, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 248, "usage_type": "call"}, {"api_name": "flask_babel._", "line_number": 248, "usage_type": "call"}, {"api_name": "recordit.utils.link.redirect_back", "line_number": 249, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 240, "usage_type": "call"}, {"api_name": "recordit.models.Report.query.get_or_404", "line_number": 255, "usage_type": "call"}, {"api_name": "recordit.models.Report.query", "line_number": 255, "usage_type": "attribute"}, {"api_name": "recordit.models.Report", "line_number": 255, "usage_type": "name"}, {"api_name": "recordit.utils.error.warn_teacher", "line_number": 256, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 258, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 258, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 259, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 259, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 259, "usage_type": "name"}, {"api_name": "recordit.models.RecordTable.query.filter_by", "line_number": 260, "usage_type": "call"}, {"api_name": "recordit.models.RecordTable.query", "line_number": 260, "usage_type": "attribute"}, {"api_name": "recordit.models.RecordTable", "line_number": 260, "usage_type": "name"}, {"api_name": "recordit.models.RecordTable.time.desc", "line_number": 261, "usage_type": "call"}, {"api_name": "recordit.models.RecordTable.time", "line_number": 261, "usage_type": "attribute"}, {"api_name": "recordit.models.RecordTable", "line_number": 261, "usage_type": "name"}, {"api_name": "recordit.forms.course.DeleteRecordForm", "line_number": 262, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 264, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 253, "usage_type": "call"}, {"api_name": "recordit.models.RecordTable.query.get_or_404", "line_number": 270, "usage_type": "call"}, {"api_name": "recordit.models.RecordTable.query", "line_number": 270, "usage_type": "attribute"}, {"api_name": "recordit.models.RecordTable", "line_number": 270, "usage_type": "name"}, {"api_name": "recordit.utils.error.warn_teacher", "line_number": 271, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session.delete", "line_number": 273, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 273, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 273, "usage_type": "name"}, {"api_name": "recordit.extensions.db.session.commit", "line_number": 274, "usage_type": "call"}, {"api_name": "recordit.extensions.db.session", "line_number": 274, "usage_type": "attribute"}, {"api_name": "recordit.extensions.db", "line_number": 274, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 276, "usage_type": "call"}, {"api_name": "flask_babel._", "line_number": 276, "usage_type": "call"}, {"api_name": "recordit.utils.link.redirect_back", "line_number": 277, "usage_type": "call"}, {"api_name": "recordit.decorators.permission_required", "line_number": 268, "usage_type": "call"}]} +{"seq_id": "634435770", "text": "import requests\nfrom PIL import Image\nfrom io import BytesIO\nimport re\n\nreq = requests.get('http://www.pythonchallenge.com/pc/def/oxygen.png')\nimg = Image.open(BytesIO(req.content))\n\nwidth, height = img.size\nprint('W: {}, H: {}'.format(width, height))\n\nstart_x, start_y = (0, 45)\nend_x, end_y = (605, 50)\nbox_cords = (start_x, start_y, end_x, end_y)\n\nbox = img.crop(box_cords)\npx_map = box.load()\n\nchar_width = 7\nchars = []\nfor j in range(0, end_x, char_width):\n chars.append(chr(px_map[j,0][0]))\n\nmsg = ''.join(chars)\nprint(msg)\n\nnext_list = re.search(r'\\[(.+)\\]', msg).group(1).split(', ')\n\nnext_level = [chr(i) for i in list(map(int, next_list))]\nprint('http://pythonchallenge.com/pc/def/{}.html'.format(''.join(next_level)))\n", "sub_path": "0x08.py", "file_name": "0x08.py", "file_ext": "py", "file_size_in_byte": 732, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 7, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 7, "usage_type": "call"}, {"api_name": "re.search", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "288226162", "text": "import cv2 as cv\nimport numpy as np\n\n\ndef detect_hough_circle_demo(image):\n # 霍夫圆检测對燥聲敏感,邊缘檢测消噪\n dst = cv.pyrMeanShiftFiltering(image, 10, 100) # 邊缘保留滤波EPF (這步去掉看看結果)\n gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY) # 變成灰度圖像\n circles = cv.HoughCircles(gray, cv.HOUGH_GRADIENT, 1, 20, param1=40, param2=30, minRadius=0, maxRadius=0)\n circles = np.uint16(np.around(circles)) #把circles包含的圆心和半徑的值變成整數\n for i in circles[0, :]:\n cv.circle(image, (i[0], i[1]), i[2], (0, 0, 255), 2) #劃出圓\n cv.circle(image, (i[0], i[1]), 2, (255, 0, 0), 2) #劃出圓心\n cv.imshow(\"circle image\", image)\n\n\nprint(\"----------- Hello Python ------------\")\nsrc = cv.imread(\"E:\\PYAI\\image/CircleDetection002.png\") # 讀取圖檔\ncv.imshow(\"Input Image\",src) # 顯示圖片\ncv.namedWindow(\"Input Image\",cv.WINDOW_AUTOSIZE) # 自動調整視窗大小\ndetect_hough_circle_demo(src)\ncv.waitKey(0) # 等待使用者按按鍵\n\ncv.destroyWindow(\"Input Image\") # 關閉視窗\n\n", "sub_path": "openCV/Sample022.py", "file_name": "Sample022.py", "file_ext": "py", "file_size_in_byte": 1209, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "cv2.pyrMeanShiftFiltering", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.HoughCircles", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.HOUGH_GRADIENT", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.WINDOW_AUTOSIZE", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.waitKey", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.destroyWindow", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "362011622", "text": "import urllib2\nfrom django.utils import simplejson as json\nimport re\n\nclass UnknownUser(Exception):\n pass\n\ndef getAnimeList(username):\n apiUrl = \"http://mal-api.com/animelist/\" + username\n f = urllib2.urlopen(apiUrl)\n j = json.loads(f.read())\n return j[\"anime\"]\n \ndef getUserId(username):\n profileUrl = \"http://myanimelist.net/profile/\" + username\n f = urllib2.urlopen(profileUrl)\n \n profileHtml = f.read()\n \n userIdRegEx = r'Send Friend Request'\n \n m = re.search(userIdRegEx, profileHtml)\n \n if m is None:\n raise UnknownUser(\"No RegEx match in getUserId for %s\" % username)\n \n return int(m.group(1))\n \ndef getRandomUsername():\n # Use the url for getting a random list\n url = \"http://myanimelist.net/users.php?lucky=1\"\n f = urllib2.urlopen(url)\n\n # Extract the username from the title\n usernameRegex = r\"([\\S]+)'s Anime List - MyAnimeList.net\"\n match = re.search(usernameRegex, f.read())\n\n if match is None:\n raise Exception(\"No regular expression match\")\n \n return match.group(1)\n \ndef getRecentOnlineUsernames():\n url = \"http://myanimelist.net/users.php\"\n f = urllib2.urlopen(url)\n \n # Extract all matches of a profile url\n usernameRegex = r'[\\S]+'\n \n match = re.findall(usernameRegex, f.read())\n \n if match is None:\n return []\n \n return match\n \n \n \n", "sub_path": "webapp/WebGrab.py", "file_name": "WebGrab.py", "file_ext": "py", "file_size_in_byte": 1571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "urllib2.urlopen", "line_number": 10, "usage_type": "call"}, {"api_name": "django.utils.simplejson.loads", "line_number": 11, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 11, "usage_type": "name"}, {"api_name": "urllib2.urlopen", "line_number": 16, "usage_type": "call"}, {"api_name": "re.search", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 32, "usage_type": "call"}, {"api_name": "re.search", "line_number": 36, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 45, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "435543623", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nmanorm.plot\n~~~~~~~~~~~\n\nThis module contains plot functions.\n\"\"\"\n\nimport os\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plt_figures(root_dir, peaks1, peaks2, peaks_merged, ma_params):\n peaks1_unique = []\n for chrom in peaks1.chroms:\n for peak in peaks1.fetch(chrom):\n if peak.type == 'unique':\n peaks1_unique.append(peak)\n\n peaks2_unique = []\n for chrom in peaks2.chroms:\n for peak in peaks2.fetch(chrom):\n if peak.type == 'unique':\n peaks2_unique.append(peak)\n\n merged_common_peaks = []\n for chrom in peaks_merged.chroms:\n for peak in peaks_merged.fetch(chrom):\n merged_common_peaks.append(peak)\n\n peaks1_name = peaks1.name + '_unique'\n peaks2_name = peaks2.name + '_unique'\n merged_peaks_name = 'merged_common_peaks'\n\n peaks_names = [peaks1_name, peaks2_name, merged_peaks_name]\n colors = [\"#E53A40\", \"#30A9DE\", \"#566270\"]\n\n output_prefix = peaks1.name + '_vs_' + peaks2.name\n fig, ax = plt.subplots(figsize=(8, 6))\n reads_density1, reads_density2 = [], []\n for peak in merged_common_peaks:\n reads_density1.append(peak.read_density1)\n reads_density2.append(peak.read_density2)\n log_read_density_max = max(np.log2(reads_density1))\n log_read_density_min = min(np.log2(reads_density1))\n ax.scatter(np.log2(reads_density1), np.log2(reads_density2), s=6, c=\"#566270\", label=merged_peaks_name, alpha=0.5)\n rx = np.arange(log_read_density_min, log_read_density_max, 0.01)\n ry = (2 - ma_params[1]) * rx / (2 + ma_params[1]) - 2 * ma_params[0] / (2 + ma_params[1])\n ax.plot(rx, ry, \"-\", color=\"#1E2022\")\n ax.legend(loc=1, fontsize=8)\n ax.set_xlabel(\"log2 read density in {}\".format(peaks1.name))\n ax.set_ylabel(\"log2 read density in {}\".format(peaks2.name))\n ax.set_title(\"M-A model fitted via common peaks\", fontsize=18)\n plt.savefig(os.path.join(root_dir, 'output_figures', output_prefix + '_read_density_on_common_peaks.png'),\n dpi=300)\n\n fig, ax = plt.subplots(figsize=(8, 6))\n ax.grid(axis=\"y\", linestyle=\"--\")\n a_max = 0\n a_min = 999999999\n for idx, peaks in enumerate([peaks1_unique, peaks2_unique, merged_common_peaks]):\n m_values = [peak.m_value for peak in peaks]\n a_values = [peak.a_value for peak in peaks]\n a_max = max(max(a_values), a_max)\n a_min = min(min(a_values), a_min)\n plt.scatter(a_values, m_values, s=6, c=colors[idx], label=peaks_names[idx], alpha=0.5)\n ax.legend(loc=1, fontsize=8)\n\n x = np.arange(a_min, a_max, 0.01)\n y = ma_params[1] * x + ma_params[0]\n ax.plot(x, y, \"-\", color=\"#1E2022\")\n ax.set_xlabel(\"A value\", fontsize=16)\n ax.set_ylabel(\"M value\", fontsize=16)\n ax.set_title(\"M-A plot before normalization\", fontsize=18)\n fig.savefig(os.path.join(root_dir, 'output_figures', output_prefix + '_MA_plot_before_normalization.png'),\n dpi=300)\n\n fig, ax = plt.subplots(figsize=(8, 6))\n ax.grid(axis=\"y\", linestyle=\"--\")\n for idx, peaks in enumerate([peaks1_unique, peaks2_unique, merged_common_peaks]):\n m_values = [peak.m_value_normed for peak in peaks]\n a_values = [peak.a_value_normed for peak in peaks]\n plt.scatter(a_values, m_values, s=6, c=colors[idx], label=peaks_names[idx], alpha=0.5)\n ax.legend(loc=1, fontsize=8)\n ax.set_xlabel(\"A value\", fontsize=16)\n ax.set_ylabel(\"M value\", fontsize=16)\n ax.set_title(\"M-A plot after normalization\", fontsize=18)\n fig.savefig(os.path.join(root_dir, 'output_figures', output_prefix + '_MA_plot_after_normalization.png'),\n dpi=300)\n\n fig, ax = plt.subplots(figsize=(8, 6))\n ax.grid(axis=\"y\", linestyle=\"--\")\n m_values = []\n a_values = []\n p_values = []\n for idx, peaks in enumerate([peaks1_unique, peaks2_unique, merged_common_peaks]):\n for peak in peaks:\n m_values.append(peak.m_value_normed)\n a_values.append(peak.a_value_normed)\n p_values.append(peak.p_value)\n colors = -np.log10(p_values)\n for i, c in enumerate(colors):\n if c > 50:\n colors[i] = 50\n scatter = ax.scatter(a_values, m_values, s=10, c=colors, cmap=\"coolwarm\")\n fig.colorbar(scatter, ax=ax)\n ax.set_xlabel(\"A value\", fontsize=16)\n ax.set_ylabel(\"M value\", fontsize=16)\n ax.set_title(\"-log10(P-value)\", fontsize=18)\n fig.savefig(os.path.join(root_dir, 'output_figures', output_prefix + '_MA_plot_with_P_value.png'), dpi=300)\n", "sub_path": "manorm/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 4584, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.use", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.log2", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}]} +{"seq_id": "106160513", "text": "import requests\nfrom bs4 import BeautifulSoup\n\n\nclass System:\n def __init__(self, systemkorname=None, systemname=None, systemid=None):\n # 분류군국명\n self.systemkorname = systemkorname\n # 분류군명\n self.systemname = systemname\n # 분류군ID\n self.systemid = systemid\n\nclass Service:\n def __init__(self):\n self.base_url = 'http://apis.data.go.kr/1400119/KffniService1'\n self.api_key = ''\n\n # 분류군정보 목록 검색\n def searchRequest(self, st, sw, numOfRows, pageNo):\n url = self.base_url + '/systemSearch?ServiceKey=' + self.api_key + '&st='+ st + '&sw=' + sw + '&numOfRows=' + numOfRows + '&pageNo=' + pageNo\n html = requests.get(url).text\n root = BeautifulSoup(html, 'lxml-xml')\n code = root.find('resultCode').text\n resultMsg = root.find('resultMsg').text\n results = []\n\n if code == '00':\n items = root.select('item')\n for item in items:\n # 분류군국명\n systemkorname = item.find('systemkorname').text\n # 분류군명\n systemname = item.find('systemname').text\n # 분류군ID\n systemid = item.find('systemid').text\n\n results.append([systemkorname, systemname, systemid])\n\n return results\n\n else:\n print('오류발생코드: ', code)\n print('오류 메시지: ', resultMsg)\n\n # 분류군정보 상세정보 조회\n def infoRequest(self, q1):\n url = self.base_url + '/systemInfo?ServiceKey=' + self.api_key + '&q1='+ q1\n html = requests.get(url).text\n root = BeautifulSoup(html, 'lxml-xml')\n code = root.find('resultCode').text\n resultMsg = root.find('resultMsg').text\n results = []\n\n if code == '00':\n items = root.select('item')\n for item in items:\n # 분류군국명\n systemkorname = item.find('systemkorname').text\n # 분류군명\n systemname = item.find('systemname').text\n # 분류군ID\n systemid = item.find('systemid').text\n # 상위분류군ID\n uppersystemid = item.find('uppersystemid').text\n # 저작권설명\n cprtCtnt = item.find('cprtCtnt').text\n\n results.append([systemkorname, systemname, systemid, uppersystemid, cprtCtnt])\n\n return results\n\n else:\n print('오류발생코드: ', code)\n print('오류 메시지: ', resultMsg)\n", "sub_path": "FinalProject_Project Mushroom/models/system_model.py", "file_name": "system_model.py", "file_ext": "py", "file_size_in_byte": 2631, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "411820087", "text": "#! /usr/bin/env python3\n\nimport vcf #sudo pip3 install PyVCF or via project in pycharm\n\n__author__ = 'Sabina Turkusic'\n\n\nclass Assignment2:\n \n def __init__(self, chr21_file, chr22_file):\n ## Check if pyvcf is installed\n print(\"PyVCF version: %s\" % vcf.VERSION)\n self.chr21_file = chr21_file\n self.chr22_file = chr22_file\n self.vcf_reader1 = vcf.Reader(open(self.chr21_file, \"r\"))\n self.vcf_reader2 = vcf.Reader(open(self.chr22_file, \"r\"))\n\n\n def get_average_quality_of_file(self):\n #record = next(self.vcf_reader2)\n #print(record.QUAL)\n quality_list = []\n with open(self.chr22_file) as my_vcf_fh:\n vcf_reader = vcf.Reader(my_vcf_fh)\n for record in vcf_reader:\n quality_list.append(record.QUAL)\n\n average_quality = sum(quality_list) / float(len(quality_list))\n print(\"The average PHRED quality is:\", average_quality)\n\n\n def get_total_number_of_variants_of_file(self):\n total_number = 0\n with open(self.chr22_file) as my_vcf_fh:\n vcf_reader = vcf.Reader(my_vcf_fh)\n for record in vcf_reader:\n total_number += 1\n print(\"The total number of variants is:\", total_number)\n \n \n def get_variant_caller_of_vcf(self):\n variant_caller = []\n with open(self.chr22_file) as my_vcf_fh:\n vcf_reader = vcf.Reader(my_vcf_fh)\n for record in vcf_reader:\n info = record.INFO[\"callsetnames\"]\n for i in range(len(info)):\n variant_caller.append(info[i])\n print(\"Variant caller of vcf:\", set(variant_caller))\n \n \n def get_human_reference_version(self):\n ref_list = []\n with open(self.chr22_file) as my_vcf_fh:\n vcf_reader = vcf.Reader(my_vcf_fh)\n for record in vcf_reader:\n info = record.INFO [\"difficultregion\"]\n reference_version = info[0][0:4]\n print(\"The human reference version is:\", reference_version)\n break\n\n \n def get_number_of_indels(self):\n indel_counter = 0\n with open(self.chr22_file) as my_vcf_fh:\n vcf_reader = vcf.Reader(my_vcf_fh)\n for record in vcf_reader:\n if record.is_indel:\n indel_counter += 1\n print(\"The number of INDELs is:\", indel_counter)\n\n\n def get_number_of_snvs(self):\n snv_counter = 0\n with open(self.chr22_file) as my_vcf_fh:\n vcf_reader = vcf.Reader(my_vcf_fh)\n for record in vcf_reader:\n if record.is_snp:\n snv_counter += 1\n print(\"The number of SNVs is:\", snv_counter)\n\n \n def get_number_of_heterozygous_variants(self):\n heterozygote_counter = 0\n with open(self.chr22_file) as my_vcf_fh:\n vcf_reader = vcf.Reader(my_vcf_fh)\n for record in vcf_reader:\n if record.num_het:\n heterozygote_counter += 1\n print(\"The number of heterozygote variants is:\", heterozygote_counter)\n\n \n def merge_chrs_into_one_vcf(self):\n vcf_read = vcf.Reader(open(self.chr21_file))\n vcf_write = vcf.Writer(open(\"merge.vcf\", \"w+\"), vcf_read)\n for record in vcf_read:\n vcf_write.write_record(record)\n\n vcf_read_1 = vcf.Reader(open(self.chr22_file))\n vcf_write_1 = vcf.Writer(open(\"merge.vcf\", \"a\"), vcf_read_1)\n for record in vcf_read_1:\n vcf_write_1.write_record(record)\n\n line_counter = 0\n with open(\"merge.vcf\") as merge:\n for line in merge:\n line_counter += 1\n\n print(\"A new file has been created and the total number of lines in new vcf file is:\", line_counter)\n\n \n def print_summary(self):\n print(\"Print all results here\")\n self.get_average_quality_of_file()\n self.get_total_number_of_variants_of_file()\n self.get_variant_caller_of_vcf()\n self.get_human_reference_version()\n self.get_number_of_indels()\n self.get_number_of_snvs()\n self.get_number_of_heterozygous_variants()\n self.merge_chrs_into_one_vcf()\n\n \ndef main():\n print(\"Assignment 2\")\n assignment2 = Assignment2(\"chr21_new.vcf\", \"chr22_new.vcf\")\n assignment2.print_summary()\n print(\"Done with assignment 2\")\n \n \nif __name__ == '__main__':\n main()\n \n \n\n\n\n", "sub_path": "assignment2_sabinaturkusic.py", "file_name": "assignment2_sabinaturkusic.py", "file_ext": "py", "file_size_in_byte": 4483, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "vcf.VERSION", "line_number": 12, "usage_type": "attribute"}, {"api_name": "vcf.Reader", "line_number": 15, "usage_type": "call"}, {"api_name": "vcf.Reader", "line_number": 16, "usage_type": "call"}, {"api_name": "vcf.Reader", "line_number": 24, "usage_type": "call"}, {"api_name": "vcf.Reader", "line_number": 35, "usage_type": "call"}, {"api_name": "vcf.Reader", "line_number": 44, "usage_type": "call"}, {"api_name": "vcf.Reader", "line_number": 55, "usage_type": "call"}, {"api_name": "vcf.Reader", "line_number": 66, "usage_type": "call"}, {"api_name": "vcf.Reader", "line_number": 76, "usage_type": "call"}, {"api_name": "vcf.Reader", "line_number": 86, "usage_type": "call"}, {"api_name": "vcf.Reader", "line_number": 94, "usage_type": "call"}, {"api_name": "vcf.Writer", "line_number": 95, "usage_type": "call"}, {"api_name": "vcf.Reader", "line_number": 99, "usage_type": "call"}, {"api_name": "vcf.Writer", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "153999866", "text": "\"\"\"\nData\n\"\"\"\nimport os\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\ndef get_dataset(img_width, img_height, class_mode):\n\n train_data_dir = os.path.expanduser('~/data/train')\n validation_data_dir = os.path.expanduser('~/data/validation')\n batch_size = 256\n\n train_datagen = ImageDataGenerator(rescale=1. / 255)\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode=class_mode,\n color_mode='grayscale')\n\n validation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode=class_mode,\n color_mode='grayscale')\n\n return {'train_generator': train_generator,\n 'validation_generator': validation_generator,\n 'input_shape': (img_width, img_height, 1)}\n", "sub_path": "data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 1006, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.expanduser", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 15, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "570826605", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\ndriver = webdriver.Chrome()\ndriver.get(\"https://admin-demo.nopcommerce.com/login\")\n\n# Get attribute of current active element\nattr = driver.switch_to.active_element.get_attribute(\"value\")\nprint(attr)\ndriver.close()", "sub_path": "TestScripts/WebElements_Example/Get_Active_Element.py", "file_name": "Get_Active_Element.py", "file_ext": "py", "file_size_in_byte": 290, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 4, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "261297649", "text": "# -*- encoding: utf-8 -*-\n\"\"\"Defines all Settings's models.\n\nMany of these modules are admin by Django Admin UI.\n\n\"\"\"\nfrom django.db import models\n\nclass Time(models.Model):\n\t\"\"\"Time table class.\"\"\"\n\tname = models.CharField(max_length='45', db_column='name', null=False, blank=False)\n\tnote = models.TextField(db_column='note', null=True, blank=True)\n\t# date_start = models.DateTimeField(db_column='date_start', auto_now=True, null=False, blank=True)\n\t# date_end = models.DateTimeField(db_column='date_end', auto_now=True, null=False, blank=True)\n\n\tclass Meta:\n\t\tdb_table = 'settings_time'\n\t\tordering = ['id']\n\t\tverbose_name = 'time'\n\t\tverbose_name_plural = 'times'\n\n\tdef __str__(self):\n\t\treturn '%s' % self.name\n\nclass Setting(models.Model):\n\t\"\"\"Setting table class.\"\"\"\n\tname = models.CharField(max_length='45', db_column='name', null=False, blank=False)\n\tvalue = models.CharField(max_length='45', db_column='value', null=False, blank=False)\n\n\tclass Meta:\n\t\tdb_table = 'settings_setting'\n\t\tordering = ['id']\n\t\tverbose_name = 'setting'\n\t\tverbose_name_plural = 'settings'\n\n\tdef __str__(self):\n\t\treturn '%s' % self.name", "sub_path": "notes/apps/settings/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1116, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.db.models.Model", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "593441600", "text": "import requests # requests是python实现的最简单易用的HTTP库\nimport re #regular expression,正则表达式,是用来简洁表达一组字符串特征的表达式。最主要应用在字符串匹配中。\nimport os #os库提供通用的,基本的操作系统交互功能(windows,mac os,linux)\n\nword = \"店铺牌\"\nif not os.path.exists(word):\n #如果不存在就自己生成一个\n os.mkdir(word)\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'}\n\n\nurls='https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&sf=1&fmq=&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height\\=&face=0&istype=2&ie=utf-8&fm=index&pos=history&word=%E5%BA%97%E9%93%BA%E7%89%8C'\n\n\ns = requests.Session()\nresponse=s.get(url=urls,headers=headers,allow_redirects=False)\nprint(response)\ncontent=response.content.decode('utf-8')\nimg_urls=re.findall('\"thumbURL\":\"(.*?)\"',content,re.S)\nprint(img_urls)\n# 设置成全局变量\nglobal i\ni=1\nfor img_url in img_urls:\n # 对每一张图片进行处理\n s = requests.Session()\n response = s.get(url=img_url,headers=headers)\n content = response.content\n # 以wb(二进制)的形式保存图片,并进行命名\n with open(word + '/' + '{}.jpg'.format(i), 'wb') as f:\n f.write(content)\n print(\"正在爬取第%d张图片\" % i)\n # 图片按照先后顺序进行从1到n的命名,所以i++\n i += 1\n\n\n\n", "sub_path": "pachong.py", "file_name": "pachong.py", "file_ext": "py", "file_size_in_byte": 1509, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.exists", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 16, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 20, "usage_type": "call"}, {"api_name": "re.S", "line_number": 20, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "41346221", "text": "import requests\nimport telebot\nimport csv\nfrom telebot import types\nimport datetime\nfrom datetime import date\nimport json\n\nJOB_STARTDATE = date(2019, 1, 1)\nWEEKLY_WORKING_HOURS = 8\nHOLIDAYS = 9\n\nmonthly_working_hours = WEEKLY_WORKING_HOURS * (31 / 7)\ndaily_working_hours = WEEKLY_WORKING_HOURS / 7\n\nLOG_IN_KEY_MESSAGE = 'Master talks to you'\nFILE_WITH_ID_PERMISSIONS = 'IDs.txt'\nFILE_WITH_WORK_HOURS = 'work_hours.txt'\n\n# Telebot API settings\nTOKEN = '910455209:AAHHYVB7hHY353ERjWQUfkjtEVMNF5T9_4g'\nbot = telebot.TeleBot(TOKEN)\n\n\n# check if ID in FILE_WITH_ID_PERMISSIONS\ndef check_permission(user_id):\n with open(FILE_WITH_ID_PERMISSIONS) as csv_file:\n id_db = csv.reader(csv_file)\n # search for id\n for row in id_db:\n if str(user_id) == row[0]:\n return True\n return False\n\n\n# append id to FILE_WITH_ID_PERMISSIONS\ndef create_permission(user_id):\n with open(FILE_WITH_ID_PERMISSIONS, mode='a') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow([str(user_id)])\n\n\ndef read_work_hours(day=False, month=False, year=date.today().year):\n with open(FILE_WITH_WORK_HOURS) as csv_file:\n work_hours_db = csv.reader(csv_file)\n # search for id\n hours = 0\n if day:\n for row in work_hours_db:\n if row[3] == str(year):\n if row[2] == str(month):\n if row[1] == str(day):\n hours += int(row[0])\n elif month:\n for row in work_hours_db:\n if row[3] == str(year):\n if row[2] == str(month):\n hours += int(row[0])\n else:\n for row in work_hours_db:\n if row[3] == str(year):\n hours += int(row[0])\n return hours\n\n\ndef read_total_worked_hours():\n with open(FILE_WITH_WORK_HOURS) as csv_file:\n work_hours_db = csv.reader(csv_file)\n hours = 0\n for row in work_hours_db:\n hours += int(row[0])\n return hours\n\n\ndef calculate_total_planned_hours():\n today = date.today()\n time_in_office = today - JOB_STARTDATE\n holidays = (time_in_office.days//365 + 1) * HOLIDAYS\n return (time_in_office.days-holidays) * daily_working_hours\n\n\ndef write_work_hours(hours, day=date.today().day, month=date.today().month, year=date.today().year):\n with open(FILE_WITH_WORK_HOURS, mode='a') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow([str(hours), str(day), str(month), str(year)])\n workday = datetime.date(year, month, day)\n return workday\n\n\n@bot.message_handler(commands=['start'])\ndef send_start_message(message):\n if check_permission(message.chat.id):\n bot.send_message(message.chat.id, 'You can add your working hours now')\n else:\n bot.send_message(message.chat.id, 'Please log in')\n\n\n@bot.message_handler(commands=['cancel'])\ndef send_confirm_cancel(message):\n if check_permission(message.chat.id):\n try:\n f = open(FILE_WITH_WORK_HOURS, \"r+\")\n lines = f.readlines()\n popped = lines.pop()\n f = open(FILE_WITH_WORK_HOURS, \"w+\")\n f.writelines(lines)\n bot.send_message(message.chat.id, \" Last record \" + str(popped) + \" was cancelled\")\n except IndexError:\n bot.send_message(message.chat.id, \" No records more!\")\n else:\n bot.send_message(message.chat.id, 'Please log in')\n\n\n@bot.message_handler(commands=['balance'])\ndef send_balance(message):\n if check_permission(message.chat.id):\n worked_hours = read_total_worked_hours()\n balance = worked_hours - calculate_total_planned_hours()\n bot.send_message(message.chat.id, \"Total worked hours: \" + str(int(worked_hours)) + \"\\nTotal balance: \" + str(int(balance)) )\n else:\n bot.send_message(message.chat.id, 'Please log in')\n\n\n@bot.message_handler(func=lambda message: message.text == LOG_IN_KEY_MESSAGE)\ndef successful_log_in(message):\n if not check_permission(message.chat.id):\n create_permission(message.chat.id)\n bot.send_message(message.chat.id, 'You are now logged in')\n else:\n bot.send_message(message.chat.id, 'You are already logged in')\n\n\n@bot.message_handler(func=lambda message: not check_permission(message.chat.id))\ndef err_no_permission(message):\n bot.send_message(message.chat.id, 'Please log in')\n\n\n@bot.message_handler(func=lambda message: (' ' in message.text) or not ('.' in message.text))\ndef add_working_hours(message):\n try:\n work_date = datetime.datetime.strptime(message.text, \"%H\")\n workday = write_work_hours(work_date.hour)\n except ValueError:\n try:\n work_date = datetime.datetime.strptime(message.text, \"%H %d\")\n workday = write_work_hours(work_date.hour, work_date.day)\n except ValueError:\n try:\n work_date = datetime.datetime.strptime(message.text, \"%H %d.%m\")\n workday = write_work_hours(work_date.hour, work_date.day, work_date.month)\n except ValueError:\n try:\n work_date = datetime.datetime.strptime(message.text, \"%H %d.%m.%y\")\n workday = write_work_hours(work_date.hour, work_date.day, work_date.month, work_date.year)\n except ValueError:\n bot.send_message(message.chat.id, \"Format was not recognised.Please write in one of these \"\n \"formats to add working hours: %H; %H %d; %H %d.%m; %H %d.%m.%y\"\n \" or in one of these to read working hours: %m.; %d.%m; %m.%y; &d,%m,%y\")\n return\n bot.send_message(message.chat.id, str(work_date.hour) + \" working hours on \" + workday.strftime(\n '%d.%m.%y') + \" were added. Good job, man!\")\n return\n\n\n@bot.message_handler(func=lambda message: True)\ndef get_working_hours(message):\n try:\n work_date = datetime.datetime.strptime(message.text, \"%m.\")\n work_date = work_date.replace(year=date.today().year)\n hours = read_work_hours(month=work_date.month)\n bot.send_message(message.chat.id, str(hours) + \" hours worked in \" + work_date.strftime('%m.%y') + \". Month \"\n \"balance = \"\n + str(hours - monthly_working_hours))\n except ValueError:\n try:\n work_date = datetime.datetime.strptime(message.text, \"%m.%y\")\n hours = read_work_hours(month=work_date.month, year=work_date.year)\n bot.send_message(message.chat.id,\n str(hours) + \" hours worked in \" + work_date.strftime('%m.%y') + \". Month \"\n \"balance = \"\n + str(hours - monthly_working_hours))\n except ValueError:\n try:\n work_date = datetime.datetime.strptime(message.text, \"%d.%m\")\n work_date = work_date.replace(year=date.today().year)\n hours = read_work_hours(day=work_date.day, month=work_date.month)\n bot.send_message(message.chat.id,\n str(hours) + \" hours worked on \" + work_date.strftime('%d.%m.%y'))\n except ValueError:\n try:\n work_date = datetime.datetime.strptime(message.text, \"%d.%m.%y\")\n hours = read_work_hours(day=work_date.day, month=work_date.month, year=work_date.year)\n bot.send_message(message.chat.id,\n str(hours) + \" hours worked on \" + work_date.strftime('%d.%m.%y'))\n except ValueError:\n bot.send_message(message.chat.id, \"Format was not recognised.Please write in one of these \"\n \"formats to add working hours: %H; %H %d; %H %d.%m; %H %d.%m.%y\"\n \" or in one of these to read working hours: %m.; %d.%m; %m.%y; &d,%m,%y\")\n return\n\n\npolling = True\nwhile polling:\n try:\n bot.polling()\n except (requests.exceptions.ReadTimeout, requests.packages.urllib3.exceptions.ReadTimeoutError,\n TimeoutError, requests.exceptions.ConnectionError):\n print(\"timeout\")\n polling = True\n", "sub_path": "WorkTimeBot/workTimeBot.py", "file_name": "workTimeBot.py", "file_ext": "py", "file_size_in_byte": 8539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "datetime.date", "line_number": 9, "usage_type": "call"}, {"api_name": "telebot.TeleBot", "line_number": 22, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 28, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 43, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 45, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 76, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 82, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 141, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 141, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 145, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 145, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 149, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 153, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 153, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 168, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 168, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 169, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 169, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 176, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 176, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 184, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 184, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 185, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 185, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 191, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 191, "usage_type": "attribute"}, {"api_name": "requests.exceptions", "line_number": 206, "usage_type": "attribute"}, {"api_name": "requests.packages", "line_number": 206, "usage_type": "attribute"}, {"api_name": "requests.exceptions", "line_number": 207, "usage_type": "attribute"}]} +{"seq_id": "261072450", "text": "from django.shortcuts import render_to_response\n\nfrom main.ipara_lib.configs import Configs\nfrom main.ipara_lib.Helper import Helper\nfrom main.ipara_lib.BankCardDeleteRequest import BankCardDeleteRequest\nfrom main.ipara_lib.BankCardInquiryRequest import BankCardInquiryRequest\nfrom main.ipara_lib.BinNumberRequest import BinNumberRequest\nfrom main.ipara_lib.BankCardCreateRequest import BankCardCreateRequest\nfrom main.ipara_lib.PaymentInquiryRequest import PaymentInquiryRequest\nfrom main.ipara_lib.ThreedInitResponse import ThreedInitResponse\nfrom main.ipara_lib.ThreedPaymentCompleteRequest import ThreedPaymentCompleteRequest\nfrom main.ipara_lib.ThreedPaymentRequest import ThreedPaymentRequest\nfrom main.ipara_lib.ApiPaymentRequest import ApiPaymentRequest\n\nfrom random import randint\nimport json\n\nconfig = Configs(\n #\"Public Magaza Anahtarı\n # size mağaza başvurunuz sonucunda gönderilen public key (açık anahtar) bilgisini kullanınız.\",\n '',\n #\"Private Magaza Anahtarı\n # size mağaza başvurunuz sonucunda gönderilen privaye key (gizli anahtar) bilgisini kullanınız.\",\n '',\n #iPara web servisleri API url'lerinin başlangıç bilgisidir.\n # Restful web servis isteklerini takip eden kodlar halinde bulacaksınız.\n 'https://api.ipara.com/', #BaseUrl\n 'https://www.ipara.com/3dgate', #ThreeDInquiryUrl\n #Test -> T, entegrasyon testlerinin sırasında \"T\" modunu,\n # canlı sisteme entegre olarak ödeme almaya başlamak için ise Prod -> \"P\" modunu kullanınız.\n 'T', #Mode\n '', #Echo\n # Kullandığınız iPara API versiyonudur.\n '1.0', #Version\n # Kullanacağınız hash bilgisini, bağlanmak istediğiniz web servis bilgisine göre doldurulmalıdır.\n # Bu bilgileri Entegrasyon rehberinin ilgili web servise ait bölümde bulabilirsiniz.\n '', #HashString\n '', #TransactionDate\n)\n\n# Ana Sayfamızda Ön Tanımlı Olarak 3D Ödeme Kısmı Gelmekte\ndef threeDPaymentRequest(request):\n message = \"\"\n if request.POST:\n req = ThreedPaymentRequest()\n req.OrderId = str(randint(1, 10000))\n req.Echo = \"Echo\"\n req.Mode = config.Mode\n req.Version = config.Version\n req.Amount = \"10000\"\n req.CardOwnerName = request.POST.get('nameSurname')\n req.CardNumber = request.POST.get('cardNumber')\n req.CardExpireMonth = request.POST.get('month')\n req.CardExpireYear = request.POST.get('year')\n req.Installment = request.POST.get('installment')\n req.Cvc = request.POST.get('cvc')\n req.ThreeD = \"true\"\n req.UserId = \"\"\n req.CardId = \"\"\n req.PurchaserName = \"Murat\"\n req.PurchaserSurname = \"Kaya\"\n req.PurchaserEmail = \"murat@kaya.com\"\n req.SuccessUrl = \"http://localhost:8000/threeDResultSuccess/\"\n req.FailUrl = \"http://localhost:8000/threeDResultFail/\"\n\n # 3D formunun 1. Adımının başlatılması için istek çağrısının yapıldığı kısımdır.\n message = req.execute(req, config)\n \n return render_to_response('index.html', {'message': message})\n\n# 3D Ödeme Sonucu Başarılı Olduğunda Çalışacak Kısım\ndef threeDResultSuccess(request):\n message = \"\"\n if request.POST:\n paymentResponse = ThreedInitResponse()\n paymentResponse.OrderId = request.POST.get('orderId')\n paymentResponse.Result = request.POST.get('result')\n paymentResponse.Amount = request.POST.get('amount')\n paymentResponse.Mode = request.POST.get('mode')\n\n if request.POST.get('errorCode') != \"\":\n paymentResponse.ErrorCode = request.POST.get('errorCode')\n if request.POST.get('transactionDate') != \"\":\n paymentResponse.TransactionDate = request.POST.get('transactionDate')\n if request.POST.get('hash') != \"\":\n paymentResponse.Hash = request.POST.get('hash')\n\n # Eğer İşlem 3D olarak Onaylandıysa\n # Sürecin İkinci Kısmını Çalıştırıyoruz\n helper = Helper()\n if helper.Validate3DReturn(paymentResponse, config):\n req = ThreedPaymentCompleteRequest()\n req.OrderId = request.POST.get('orderId')\n req.Echo = \"Echo\"\n req.Mode = config.Mode\n req.Amount = \"10000\"\n req.CardOwnerName = \"Fatih Coşkun\"\n req.CardNumber = \"4282209027132016\"\n req.CardExpireMonth = \"05\"\n req.CardExpireYear = \"18\"\n req.Installment = \"1\"\n req.Cvc = \"000\"\n req.ThreeD = \"true\"\n req.ThreeDSecureCode = request.POST.get('threeDSecureCode')\n req.UserId = \"\"\n req.CardId = \"\"\n\n # Sipariş veren bilgileri\n req.Purchaser = req.PurchaserClass()\n req.Purchaser.name = \"Murat\"\n req.Purchaser.surname = \"Kaya\"\n req.Purchaser.birthDate = \"1986-07-11\"\n req.Purchaser.email = \"murat@kaya.com\"\n req.Purchaser.gsmPhone = \"5881231212\"\n req.Purchaser.tcCertificate = \"1234567890\"\n req.Purchaser.clientIp = \"127.0.0.1\"\n\n # region Fatura bilgileri\n req.Purchaser.invoiceAddress = req.PurchaserAddress()\n req.Purchaser.invoiceAddress.name = \"Murat\"\n req.Purchaser.invoiceAddress.surname = \"Kaya\"\n req.Purchaser.invoiceAddress.address = \"Mevlüt Pehlivan Mah. Multinet Plaza Şişli\"\n req.Purchaser.invoiceAddress.zipCode = \"34782\"\n req.Purchaser.invoiceAddress.cityCode = \"34\"\n req.Purchaser.invoiceAddress.tcCertificate = \"1234567890\"\n req.Purchaser.invoiceAddress.country = \"TR\"\n req.Purchaser.invoiceAddress.taxNumber = \"123456\"\n req.Purchaser.invoiceAddress.taxOffice = \"Kozyatağı\"\n req.Purchaser.invoiceAddress.companyName = \"iPara\"\n req.Purchaser.invoiceAddress.phoneNumber = \"2122222222\"\n\n # region Kargo Adresi bilgileri\n req.Purchaser.shippingAddress = req.PurchaserAddress()\n req.Purchaser.shippingAddress.name = \"Murat\"\n req.Purchaser.shippingAddress.surname = \"Kaya\"\n req.Purchaser.shippingAddress.address = \"Mevlüt Pehlivan Mah. Multinet Plaza Şişli\"\n req.Purchaser.shippingAddress.zipCode = \"34782\"\n req.Purchaser.shippingAddress.cityCode = \"34\"\n req.Purchaser.shippingAddress.tcCertificate = \"1234567890\"\n req.Purchaser.shippingAddress.country = \"TR\"\n req.Purchaser.shippingAddress.phoneNumber = \"2122222222\"\n\n # Ürün Bilgileri\n req.Products = []\n product1 = req.Product()\n product1.title = \"Telefon\"\n product1.code = \"TLF0001\"\n product1.price = \"5000\"\n product1.quantity = \"1\"\n req.Products.append(product1)\n\n product2 = req.Product()\n product2.title = \"Bilgisayar\"\n product2.code = \"BLG0001\"\n product2.price = \"5000\"\n product2.quantity = \"1\"\n req.Products.append(product2)\n\n config.BaseUrl = \"https://api.ipara.com/\"\n # 3D formunun 2. Adımında ödeme işleminin tamamlanması için başlatılan istek\n # çağrısının yapıldığı kısımdır.\n message = Helper.formatXML(req.execute(req, config))\n \n return render_to_response('threeDResultSuccess.html', {'message': message})\n\n\n#3D secure ödeme sonucu başarısız olup, hata mesajının son kullanıcıya gösterildiği kısımdır.\ndef threeDResultFail(request):\n if request.content_params != None:\n output = \"\"\n output += \"\"\n if request.POST.get('echo') != \"\":\n output += \"\"+request.POST.get('echo') +\"\"\n if request.POST.get('result') != \"\":\n output += \"\"+request.POST.get('result')+\"\"\n if request.POST.get('amount') != \"\":\n output += \"\"+request.POST.get('amount')+\"\"\n if request.POST.get('publicKey') != \"\":\n output += \"\"+request.POST.get('publicKey')+\"\"\n if request.POST.get('orderId') != \"\":\n output += \"\"+request.POST.get('orderId')+\"\"\n if request.POST.get('mode') != \"\":\n output += \"\"+request.POST.get('mode')+\"\"\n if request.POST.get('errorCode') != \"\":\n output += \"\"+request.POST.get('errorCode')+\"\"\n if request.POST.get('errorMessage') != \"\":\n output += \"\"+request.POST.get('errorMessage')+\"\"\n output += \"\"\n output = Helper.formatXML(output)\n return render_to_response('threeDResultFail.html', {'message': output})\n\n\n# 3D Olmadan Ödeme Örneği\ndef apiPaymentRequest(request):\n message = \"\"\n if request.POST:\n non3DPaymentRequest = ApiPaymentRequest()\n non3DPaymentRequest.Echo = \"Echo\"\n non3DPaymentRequest.Mode = config.Mode\n non3DPaymentRequest.ThreeD = \"false\"\n non3DPaymentRequest.OrderId = str(randint(1, 10000))\n non3DPaymentRequest.Amount = \"10000\"\n non3DPaymentRequest.CardOwnerName = request.POST.get('nameSurname')\n non3DPaymentRequest.CardNumber = request.POST.get('cardNumber')\n non3DPaymentRequest.CardExpireMonth = request.POST.get('month')\n non3DPaymentRequest.CardExpireYear = request.POST.get('year')\n non3DPaymentRequest.Installment = request.POST.get('installment')\n non3DPaymentRequest.Cvc = request.POST.get('cvc')\n non3DPaymentRequest.VendorId = \"\"\n non3DPaymentRequest.UserId = \"123456\"\n non3DPaymentRequest.CardId = \"\"\n non3DPaymentRequest.ThreeDSecureCode = \"\"\n\n non3DPaymentRequest.Purchaser = non3DPaymentRequest.PurchaserClass()\n non3DPaymentRequest.Purchaser.name = \"Murat\"\n non3DPaymentRequest.Purchaser.surname = \"Kaya\"\n non3DPaymentRequest.Purchaser.birthDate = \"1986-07-11\"\n non3DPaymentRequest.Purchaser.email = \"mura@kaya.com\"\n non3DPaymentRequest.Purchaser.gsmPhone = \"5881231212\"\n non3DPaymentRequest.Purchaser.tcCertificate = \"58812312547\"\n non3DPaymentRequest.Purchaser.clientIp = \"127.0.0.1\"\n\n # Fatura Bilgileri\n non3DPaymentRequest.Purchaser.invoiceAddress = non3DPaymentRequest.PurchaserAddress()\n non3DPaymentRequest.Purchaser.invoiceAddress.name = \"Murat\"\n non3DPaymentRequest.Purchaser.invoiceAddress.surname = \"Kaya\"\n non3DPaymentRequest.Purchaser.invoiceAddress.address = \"Mevlüt Pehlivan Mah. Multinet Plaza Şişli\"\n non3DPaymentRequest.Purchaser.invoiceAddress.zipCode = \"34782\"\n non3DPaymentRequest.Purchaser.invoiceAddress.cityCode = \"34\"\n non3DPaymentRequest.Purchaser.invoiceAddress.tcCertificate = \"1234567890\"\n non3DPaymentRequest.Purchaser.invoiceAddress.country = \"TR\"\n non3DPaymentRequest.Purchaser.invoiceAddress.taxNumber = \"123456\"\n non3DPaymentRequest.Purchaser.invoiceAddress.taxOffice = \"Kozyatagi\"\n non3DPaymentRequest.Purchaser.invoiceAddress.companyName = \"iPara\"\n non3DPaymentRequest.Purchaser.invoiceAddress.phoneNumber = \"2122222222\"\n\n # Kargo Bilgileri\n non3DPaymentRequest.Purchaser.shippingAddress = non3DPaymentRequest.PurchaserAddress()\n non3DPaymentRequest.Purchaser.shippingAddress.name = \"Murat\"\n non3DPaymentRequest.Purchaser.shippingAddress.surname = \"Kaya\"\n non3DPaymentRequest.Purchaser.shippingAddress.address = \"Mevlüt Pehlivan Mah. Multinet Plaza Şişli\"\n non3DPaymentRequest.Purchaser.shippingAddress.zipCode = \"34782\"\n non3DPaymentRequest.Purchaser.shippingAddress.cityCode = \"34\"\n non3DPaymentRequest.Purchaser.shippingAddress.tcCertificate = \"1234567890\"\n non3DPaymentRequest.Purchaser.shippingAddress.country = \"TR\"\n non3DPaymentRequest.Purchaser.shippingAddress.phoneNumber = \"2122222222\"\n\n # Ürün Bilgileri\n non3DPaymentRequest.Products = []\n product1 = non3DPaymentRequest.Product()\n product1.title = \"Telefon\"\n product1.code = \"TLF0001\"\n product1.price = \"5000\"\n product1.quantity = \"1\"\n non3DPaymentRequest.Products.append(product1)\n\n product2 = non3DPaymentRequest.Product()\n product2.title = \"Bilgisayar\"\n product2.code = \"BLG0001\"\n product2.price = \"5000\"\n product2.quantity = \"1\"\n non3DPaymentRequest.Products.append(product2)\n\n # API Cagrisi Yapiyoruz\n message = Helper.formatXML(non3DPaymentRequest.execute(non3DPaymentRequest, config))\n \n return render_to_response('apiPayment.html', {'message': message})\n\n\ndef paymentInquryRequest(request):\n message = \"\"\n if request.POST:\n req = PaymentInquiryRequest()\n req.orderId = request.POST.get('orderId')\n\n # ödeme sorgulama servisi api çağrısının yapıldığı kısımdır.\n message = Helper.formatXML(req.execute(req, config))\n\n return render_to_response('paymentInqury.html', {'message': message})\n\n\n# Cüzdandaki Kartla Tek Tıkla Ödeme Yaptığımız Örnek\ndef apiPaymentWithWallet(request):\n message = \"\"\n if request.POST:\n req = ApiPaymentRequest()\n req.OrderId = str(randint(1, 10000))\n req.Echo = \"Echo\"\n req.Mode = config.Mode\n req.Amount = \"10000\"\n req.CardOwnerName = \"\"\n req.CardNumber = \"\"\n req.CardExpireMonth = \"\"\n req.CardExpireYear = \"\"\n req.Installment = \"\"\n req.Cvc = \"\"\n req.ThreeD = \"false\"\n req.UserId = request.POST.get('userId')\n req.CardId = request.POST.get('cardId')\n\n # Sipariş veren bilgileri\n req.Purchaser = req.PurchaserClass()\n req.Purchaser.name = \"Murat\"\n req.Purchaser.surname = \"Kaya\"\n req.Purchaser.birthDate = \"1986-07-11\"\n req.Purchaser.email = \"murat@kaya.com\"\n req.Purchaser.gsmPhone = \"5889541011\"\n req.Purchaser.tcCertificate = \"1234567890\"\n req.Purchaser.clientIp = \"127.0.0.1\"\n\n # Fatura bilgileri\n req.Purchaser.invoiceAddress = req.PurchaserAddress()\n req.Purchaser.invoiceAddress.name = \"Murat\"\n req.Purchaser.invoiceAddress.surname = \"Kaya\"\n req.Purchaser.invoiceAddress.address = \"Mevlüt Pehlivan Mah. Multinet Plaza Şişli\"\n req.Purchaser.invoiceAddress.zipCode = \"34782\"\n req.Purchaser.invoiceAddress.cityCode = \"34\"\n req.Purchaser.invoiceAddress.tcCertificate = \"1234567890\"\n req.Purchaser.invoiceAddress.country = \"TR\"\n req.Purchaser.invoiceAddress.phoneNumber = \"2122222222\"\n\n # Kargo adresi bilgileri\n req.Purchaser.shippingAddress = req.PurchaserAddress()\n req.Purchaser.shippingAddress.name = \"Murat\"\n req.Purchaser.shippingAddress.surname = \"Kaya\"\n req.Purchaser.shippingAddress.address = \"Mevlüt Pehlivan Mah. Multinet Plaza Şişli\"\n req.Purchaser.shippingAddress.zipCode = \"34782\"\n req.Purchaser.shippingAddress.cityCode = \"34\"\n req.Purchaser.shippingAddress.tcCertificate = \"1234567890\"\n req.Purchaser.shippingAddress.country = \"TR\"\n req.Purchaser.shippingAddress.phoneNumber = \"2122222222\"\n\n # Ürün Bilgileri\n req.Products = []\n product1 = req.Product()\n product1.title = \"Telefon\"\n product1.code = \"TLF0001\"\n product1.price = \"5000\"\n product1.quantity = \"1\"\n req.Products.append(product1)\n\n product2 = req.Product()\n product2.title = \"Bilgisayar\"\n product2.code = \"BLG0001\"\n product2.price = \"5000\"\n product2.quantity = \"1\"\n req.Products.append(product2)\n\n # Cüzdandaki kart ile ödeme yapma API çağrısının yapıldığı kısımdır.\n message = Helper.formatXML(req.execute(req, config))\n\n return render_to_response('apiPaymentWithWallet.html', {'message': message})\n\n\n# Cüzdandaki Kartları Listelediğimiz Kısım\ndef getCardFromWallet(request):\n message = \"\"\n if request.POST:\n req = BankCardInquiryRequest()\n req.userId = request.POST.get('userId')\n req.cardId = request.POST.get('cardId')\n req.clientIp = \"127.0.0.1\"\n\n # Cüzdandan kartların getirildiği API cagrisini temsil etmektedir\n response = req.execute(req, config)\n message = json.dumps(json.loads(response), indent=4, ensure_ascii=False)\n\n return render_to_response('getCardFromWallet.html', {'message': message})\n\n\n# Cüzdana Kart Eklediğimiz Kısım\ndef addCartToWallet(request):\n message = \"\"\n if request.POST:\n req = BankCardCreateRequest()\n req.userId = request.POST.get('userId')\n req.cardOwnerName = request.POST.get('nameSurname')\n req.cardNumber = request.POST.get('cardNumber')\n req.cardAlias = request.POST.get('cardAlias')\n req.cardExpireMonth = request.POST.get('month')\n req.cardExpireYear = request.POST.get('year')\n req.clientIp = \"127.0.0.1\"\n\n # Cüzdana kart eklemek için yapılan API cagrisini temsil etmektedir\n response = req.execute(req, config)\n message = json.dumps(json.loads(response), indent=4, ensure_ascii=False)\n\n return render_to_response('addCartToWallet.html', {'message': message})\n\n\n# Cüzdandan Kart Sildiğimiz Kısım\ndef deleteCardFromWallet(request):\n message = \"\"\n if request.POST:\n req = BankCardDeleteRequest()\n req.userId = request.POST.get('userId')\n req.cardId = request.POST.get('cardId')\n req.clientIp = \"127.0.0.1\"\n\n # Cüzdanda bulunan karti silmek için yapılan API cagrisini temsil etmektedir\n response = req.execute(req, config)\n message = json.dumps(json.loads(response), indent=4, ensure_ascii=False)\n\n return render_to_response('deleteCardFromWallet.html', {'message': message})\n\n\n# Bin İsteği Yaptığımız Kısım\ndef binRequest(request):\n message = \"\"\n if request.POST:\n req = BinNumberRequest()\n req.binNumber = request.POST.get('binNumber')\n\n # Bin istegi icin yapılan API cagrisini temsil etmektedir\n response = req.execute(req, config)\n message = json.dumps(json.loads(response), indent=4, ensure_ascii=False)\n\n return render_to_response('bininqury.html', {'message': message})\n", "sub_path": "main/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 18461, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "main.ipara_lib.configs.Configs", "line_number": 18, "usage_type": "call"}, {"api_name": "main.ipara_lib.ThreedPaymentRequest.ThreedPaymentRequest", "line_number": 45, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 69, "usage_type": "call"}, {"api_name": "main.ipara_lib.ThreedInitResponse.ThreedInitResponse", "line_number": 75, "usage_type": "call"}, {"api_name": "main.ipara_lib.Helper.Helper", "line_number": 90, "usage_type": "call"}, {"api_name": "main.ipara_lib.ThreedPaymentCompleteRequest.ThreedPaymentCompleteRequest", "line_number": 92, "usage_type": "call"}, {"api_name": "main.ipara_lib.Helper.Helper.formatXML", "line_number": 162, "usage_type": "call"}, {"api_name": "main.ipara_lib.Helper.Helper", "line_number": 162, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 164, "usage_type": "call"}, {"api_name": "main.ipara_lib.Helper.Helper.formatXML", "line_number": 189, "usage_type": "call"}, {"api_name": "main.ipara_lib.Helper.Helper", "line_number": 189, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 190, "usage_type": "call"}, {"api_name": "main.ipara_lib.ApiPaymentRequest.ApiPaymentRequest", "line_number": 197, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 201, "usage_type": "call"}, {"api_name": "main.ipara_lib.Helper.Helper.formatXML", "line_number": 265, "usage_type": "call"}, {"api_name": "main.ipara_lib.Helper.Helper", "line_number": 265, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 267, "usage_type": "call"}, {"api_name": "main.ipara_lib.PaymentInquiryRequest.PaymentInquiryRequest", "line_number": 273, "usage_type": "call"}, {"api_name": "main.ipara_lib.Helper.Helper.formatXML", "line_number": 277, "usage_type": "call"}, {"api_name": "main.ipara_lib.Helper.Helper", "line_number": 277, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 279, "usage_type": "call"}, {"api_name": "main.ipara_lib.ApiPaymentRequest.ApiPaymentRequest", "line_number": 286, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 287, "usage_type": "call"}, {"api_name": "main.ipara_lib.Helper.Helper.formatXML", "line_number": 350, "usage_type": "call"}, {"api_name": "main.ipara_lib.Helper.Helper", "line_number": 350, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 352, "usage_type": "call"}, {"api_name": "main.ipara_lib.BankCardInquiryRequest.BankCardInquiryRequest", "line_number": 359, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 366, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 366, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 368, "usage_type": "call"}, {"api_name": "main.ipara_lib.BankCardCreateRequest.BankCardCreateRequest", "line_number": 375, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 386, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 386, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 388, "usage_type": "call"}, {"api_name": "main.ipara_lib.BankCardDeleteRequest.BankCardDeleteRequest", "line_number": 395, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 402, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 402, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 404, "usage_type": "call"}, {"api_name": "main.ipara_lib.BinNumberRequest.BinNumberRequest", "line_number": 411, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 416, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 416, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 418, "usage_type": "call"}]} +{"seq_id": "161670520", "text": "from config import database, helpers, db_context\nimport datetime\nimport base\nimport threading\nhelpers.extent_model(\n \"HCSLS_EmployeeType\",\n \"base\",\n [[\"emp_type_code\"]],\n emp_type_code=(\"text\", True),\n emp_type_name=(\"text\", True),\n emp_type_name2=(\"text\"),\n rate_main_sal=(\"numeric\"),\n rate_soft_sal=(\"numeric\"),\n true_type=(\"numeric\", True),\n # probation_time_by=(\"numeric\"),\n # probation_time=(\"text\"),\n # is_fix=(\"numeric\"),\n # coeff=(\"text\"),\n # begin_date_cal=(\"numeric\"),\n ordinal=(\"numeric\"),\n note=(\"text\"),\n lock=(\"bool\"),\n created_on=(\"date\"),\n created_by=(\"text\"),\n modified_on=(\"date\"),\n modified_by=(\"text\")\n #department_code=(\"text\")\n )\ndef HCSLS_EmployeeType():\n ret = db_context.collection(\"HCSLS_EmployeeType\")\n return ret", "sub_path": "apps/performance/api/models/HCSLS_EmployeeType.py", "file_name": "HCSLS_EmployeeType.py", "file_ext": "py", "file_size_in_byte": 995, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "config.helpers.extent_model", "line_number": 5, "usage_type": "call"}, {"api_name": "config.helpers", "line_number": 5, "usage_type": "name"}, {"api_name": "config.db_context.collection", "line_number": 30, "usage_type": "call"}, {"api_name": "config.db_context", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "507341869", "text": "from PySide2 import QtWidgets\nfrom PySide2.QtGui import QPixmap\nfrom PySide2.QtWidgets import QLabel\n\nfrom node_launcher.assets.asset_access import AssetAccess\n\n\nclass LaunchWidget(QtWidgets.QWidget):\n def __init__(self, node_launcher, asset_access=AssetAccess()):\n super().__init__()\n self.node_launcher = node_launcher\n\n self.launchTestnetBitcoinQtNodeButton = QtWidgets.QPushButton('Bitcoin')\n self.launchTestnetLndNodeButton = QtWidgets.QPushButton('LND')\n self.launchMainnetBitcoinQtNodeButton = QtWidgets.QPushButton('Bitcoin')\n self.launchMainnetLndNodeButton = QtWidgets.QPushButton('LND')\n\n self.grid = QtWidgets.QGridLayout()\n\n testnet_layout = QtWidgets.QVBoxLayout()\n testnet_image = QLabel(self)\n testnet_pixmap = QPixmap(\n asset_access.get_asset_full_path('bitcoin-testnet.png'))\n testnet_image.setPixmap(testnet_pixmap)\n testnet_layout.addWidget(testnet_image)\n testnet_layout.addWidget(self.launchTestnetBitcoinQtNodeButton)\n testnet_layout.addWidget(self.launchTestnetLndNodeButton)\n testnet_layout.addStretch(1)\n testnet_group_box = QtWidgets.QGroupBox('testnet')\n testnet_group_box.setLayout(testnet_layout)\n self.grid.addWidget(testnet_group_box, 1, 1)\n\n mainnet_layout = QtWidgets.QVBoxLayout()\n mainnet_image = QLabel(self)\n mainnet_pixmap = QPixmap(\n asset_access.get_asset_full_path('bitcoin.png'))\n mainnet_image.setPixmap(mainnet_pixmap)\n mainnet_layout.addWidget(mainnet_image)\n mainnet_layout.addWidget(self.launchMainnetBitcoinQtNodeButton)\n mainnet_layout.addWidget(self.launchMainnetLndNodeButton)\n mainnet_layout.addStretch(1)\n mainnet_group_box = QtWidgets.QGroupBox('mainnet')\n mainnet_group_box.setLayout(mainnet_layout)\n self.grid.addWidget(mainnet_group_box, 1, 2)\n\n self.setLayout(self.grid)\n\n self.launchTestnetBitcoinQtNodeButton.clicked.connect(\n self.node_launcher.launchTestnetBitcoinQtNode)\n self.launchTestnetLndNodeButton.clicked.connect(\n self.node_launcher.launchTestnetLndNode)\n self.launchMainnetBitcoinQtNodeButton.clicked.connect(\n self.node_launcher.launchMainnetBitcoinQtNode)\n self.launchMainnetLndNodeButton.clicked.connect(\n self.node_launcher.launchMainnetLndNode)\n", "sub_path": "node_launcher/gui/launch_widget.py", "file_name": "launch_widget.py", "file_ext": "py", "file_size_in_byte": 2430, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "PySide2.QtWidgets.QWidget", "line_number": 8, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 8, "usage_type": "name"}, {"api_name": "node_launcher.assets.asset_access.AssetAccess", "line_number": 9, "usage_type": "call"}, {"api_name": "node_launcher.assets.asset_access", "line_number": 11, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QPushButton", "line_number": 13, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 13, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QPushButton", "line_number": 14, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 14, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QPushButton", "line_number": 15, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 15, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QPushButton", "line_number": 16, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 16, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QGridLayout", "line_number": 18, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 18, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 20, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 20, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QLabel", "line_number": 21, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPixmap", "line_number": 22, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QGroupBox", "line_number": 29, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 29, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 33, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 33, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QLabel", "line_number": 34, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPixmap", "line_number": 35, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QGroupBox", "line_number": 42, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "573839296", "text": "from hashlib import sha1\nimport json\nimport re\nimport time\n\nfrom tower import ugettext as _\n\nfrom django.conf import settings\n\nfrom kitsune.offline.index import (\n TFIDFIndex,\n find_word_locations_with_spaces,\n find_word_locations_without_spaces\n)\nfrom kitsune.wiki.config import TROUBLESHOOTING_CATEGORY, HOW_TO_CATEGORY\nfrom kitsune.wiki.models import Document\n\n\n_noscript_regex = re.compile(r'', flags=re.DOTALL)\n\n\ndef bundle_key(locale, product_slug):\n \"\"\"The key for a bundle as stored in client-side's indexeddb.\n\n The arguments to this function must be strings. This key is used\n for the index.\n \"\"\"\n return locale + '~' + product_slug\n\n\ndef doc_key(locale, doc_slug):\n \"\"\"The key for a document as stored in client-side's indexeddb.\n\n The arguments to this function must be strings.\n \"\"\"\n return locale + '~' + doc_slug\n\n\ndef topic_key(locale, product_slug, topic_slug):\n \"\"\"The key for a topic as stored in client-side's indexeddb.\n\n The arguments to this function must be strings.\n \"\"\"\n return locale + '~' + product_slug + '~' + topic_slug\n\n\ndef redis_bundle_name(locale, product_slug):\n return 'osumo:' + bundle_key(locale.lower(), product_slug.lower())\n\n\ndef transform_html(dochtml):\n \"\"\"Transforms the html to something we want to serve in the app.\n\n Do things to the document html such as stripping out things the\n offline app do not need. We could also do this in WikiParser,\n but this is probably easier for now.\n \"\"\"\n # Strip out all the