diff --git "a/1236.jsonl" "b/1236.jsonl" new file mode 100644--- /dev/null +++ "b/1236.jsonl" @@ -0,0 +1,453 @@ +{"seq_id": "418913376", "text": "import numpy as np\r\nfrom keras.models import Model \r\nfrom keras.layers import Dense, Input\r\nfrom keras.utils import np_utils\r\nfrom keras.datasets import mnist\r\n\r\nx = Input(shape=(784,), dtype=\"float32\")\r\nNN = Dense(512, activation=\"sigmoid\")(x)\r\nNN = Dense(10, activation=\"softmax\")(x)\r\n\r\nmodel = Model(inputs=[x], outputs=[NN])\r\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", loss_weights=[0.5], metrics=[\"accuracy\"])\r\n\r\n(train_x, train_y), (test_x, test_y) = mnist.load_data()\r\ntrain_x = train_x.reshape(60000, 784) / 255\r\ntest_x = test_x.reshape(10000, 784) / 255\r\nprint(len(train_x))\r\ntrain_y = np_utils.to_categorical(train_y)\r\ntest_y = np_utils.to_categorical(test_y)\r\n# from _CreateBMP import CreateBMP\r\n# CreateBMP(train_x[0], train_y[0])\r\n# print(train_x[0], train_y[0])\r\n\r\nmodel.fit(x=[train_x], y=[train_y], epochs=30)\r\nscore = model.evaluate(x=[test_x], y=[test_y])\r\nprint(\"Loss:%f, Accuracy:%f\" % (score[0], score[1]))\r\n\r\nmodel.save_weights(\"mnist.h5\")\r\nwith open(\"mnist.json\", \"w\") as f:\r\n f.write(model.to_json())", "sub_path": "ml/MNIST/FNN/krs_mnist.py", "file_name": "krs_mnist.py", "file_ext": "py", "file_size_in_byte": 1049, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "keras.layers.Input", "line_number": 7, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 8, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 9, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 11, "usage_type": "call"}, {"api_name": "keras.datasets.mnist.load_data", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 14, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 18, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "429298202", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n\nCreated on 2017/4/14\n\n@author: ybwang\n\"\"\"\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom sklearn.metrics import roc_curve, roc_auc_score\nimport matplotlib.pyplot as plt\n\n\ndef rosen(x):\n return sum(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0)\n\n\ndef loss(w, data):\n x = data[:, :-1]\n y = data[:, -1]\n w = np.array([w])\n p = (1.0 / (1.0 + np.exp(-x.dot(w.T)))).flatten()\n mle = -np.sum(y * np.log(p + 1e-9) + (1 - y) * np.log(1 - p + 1e-9))\n return mle\n\n\ndef readTSV(tsvfile, skipcolumn=True, skiprow=True):\n data = []\n n = 0\n with open(tsvfile, 'r') as f:\n for line in f:\n if n == 0 and skiprow == True:\n n += 1\n continue\n ele = line.rstrip().split('\\t')\n if skipcolumn == True: ele = ele[1::]\n data.append(ele)\n return np.float64(np.array(data))\n\n\ndata = readTSV('E:/Pycharm/code/hp/dataPLS.txt')\n\n# loss(np.array([np.repeat(1, 62)]), np.hstack((np.ones((data.shape[0], 1)), data)))\n\nx0 = np.repeat(0, 62).tolist()\nd = np.hstack((np.ones((data.shape[0], 1)), data))\n# print loss(x0, d)\nres = minimize(loss, x0, args=(d,), method='BFGS')\n\nw = np.array([res.x] ).reshape((-1,1))\np = 1 / (1 + np.exp(-d[:,:-1].dot(w)))\nauc = roc_auc_score(d[:,-1].flatten(), p.flatten())\nfpr, tpr, threshold = roc_curve(d[:,-1].flatten(), p.flatten(),pos_label=1)\nplt.plot(fpr,tpr,'r-')\nplt.legend(['AUC = ' +str(auc)], loc=4)\nplt.show()\n", "sub_path": "optim.py", "file_name": "optim.py", "file_ext": "py", "file_size_in_byte": 1490, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 46, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "288315529", "text": "import argparse\nimport matplotlib.pyplot as plt\nimport itertools\nfrom wordcloud import WordCloud\nfrom VectReco.R2VModel import R2VModel\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n\",default=30, type=int)\nparser.add_argument(\"model\", type=str)\nparser.add_argument(\"word\",type=str)\nargs = parser.parse_args()\n\n\nmod = R2VModel.from_w2v_text(args.model,binary=True)\n\nwords = mod.most_similar(vect=mod[args.word],limit=\"words\",topn=args.n)\n\nfreq = [(word,round(((((sim+1)/2)/1)*100))) for word,sim in words]\n\nduped = [[w]*int(f) for w,f in freq]\ntext = \" \".join(itertools.chain.from_iterable(duped))\n\n# text = \" \".join([w for w,s in words])\nwordcloud = WordCloud(width=200, height=200, margin=2, ranks_only=False, prefer_horizontal=0.9, mask=None, scale=1, max_words=200, background_color='white').generate(text)\n# Open a plot of the generated image.\nplt.imshow(wordcloud)\nplt.axis(\"off\")\nplt.show()", "sub_path": "generate_wordcloud.py", "file_name": "generate_wordcloud.py", "file_ext": "py", "file_size_in_byte": 911, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "VectReco.R2VModel.R2VModel.from_w2v_text", "line_number": 14, "usage_type": "call"}, {"api_name": "VectReco.R2VModel.R2VModel", "line_number": 14, "usage_type": "name"}, {"api_name": "itertools.chain.from_iterable", "line_number": 21, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 21, "usage_type": "attribute"}, {"api_name": "wordcloud.WordCloud", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "363105806", "text": "# This Python file uses the following encoding: utf-8\n# do not remove the above comment as it is actually special!\n\n# built to test the issues in https://code.google.com/p/googleappengine/issues/detail?id=7981\n\nimport webapp2\nfrom google.appengine.ext.webapp.util import run_wsgi_app\n\nfrom google.appengine.api import search\n\nimport json\n\nproblem_strings = [u\"あああ123\", u\"あああ\", u\"(~ ̄▽ ̄)~\", u\" ̄▽ ̄\"]\n\n_INDEX_NAME = 'japaneseSpecialChars'\n\nclass MainPage(webapp2.RequestHandler):\n \n \n def get(self):\n self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'\n # build the index and documents\n for ps in problem_strings:\n problem_doc = search.Document(\n fields=[search.TextField(name='author', value=\"anon\"),\n search.TextField(name='content', value=ps)])\n search.Index(name=_INDEX_NAME).put(problem_doc)\n # query the index and documents\n for ps in problem_strings:\n self.response.out.write (\"searching for \\\"\" + ps + \"\\\":\\n\")\n try:\n query_obj = search.Query(query_string=ps)\n results = search.Index(name=_INDEX_NAME).search(query=query_obj)\n # if we get here, there was no exception\n self.response.out.write (\"query succeeded \\n\")\n except: \n self.response.out.write (\"Failed to parse query \\\"\" + ps + \"\\\"\\n\")\n self.response.out.write (\"\\n\")\n\n\napplication = webapp2.WSGIApplication([('/', MainPage)], debug=True)\n\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "japaneseUnicodeSearch.py", "file_name": "japaneseUnicodeSearch.py", "file_ext": "py", "file_size_in_byte": 1653, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "webapp2.RequestHandler", "line_number": 17, "usage_type": "attribute"}, {"api_name": "google.appengine.api.search.Document", "line_number": 24, "usage_type": "call"}, {"api_name": "google.appengine.api.search", "line_number": 24, "usage_type": "name"}, {"api_name": "google.appengine.api.search.TextField", "line_number": 25, "usage_type": "call"}, {"api_name": "google.appengine.api.search", "line_number": 25, "usage_type": "name"}, {"api_name": "google.appengine.api.search.TextField", "line_number": 26, "usage_type": "call"}, {"api_name": "google.appengine.api.search", "line_number": 26, "usage_type": "name"}, {"api_name": "google.appengine.api.search.Index", "line_number": 27, "usage_type": "call"}, {"api_name": "google.appengine.api.search", "line_number": 27, "usage_type": "name"}, {"api_name": "google.appengine.api.search.Query", "line_number": 32, "usage_type": "call"}, {"api_name": "google.appengine.api.search", "line_number": 32, "usage_type": "name"}, {"api_name": "google.appengine.api.search.Index", "line_number": 33, "usage_type": "call"}, {"api_name": "google.appengine.api.search", "line_number": 33, "usage_type": "name"}, {"api_name": "webapp2.WSGIApplication", "line_number": 41, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.util.run_wsgi_app", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "213954077", "text": "from os import path\n\nfrom setuptools import setup\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"picpay-python\",\n version=\"0.4.2\",\n description=\"Aceite PicPay e faça parte do movimento que está revolucionando a relação com o dinheiro no Brasil.\",\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n url=\"https://github.com/hudsonbrendon/picpay-python\",\n author=\"Hudson Brendon\",\n author_email=\"contato.hudsonbrendon@gmail.com\",\n license=\"MIT\",\n packages=[\"picpay\"],\n install_requires=[\n \"requests\",\n ],\n zip_safe=False,\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "621887670", "text": "from django.db import models\n\n# Create your models here.\n\n\nclass Actor(models.Model):\n\n GENDER = (\n ('male', 'Male'),\n ('female', 'Female'),\n ('other', 'Other')\n )\n actor_id = models.CharField(max_length=100, primary_key=True)\n name = models.CharField(max_length=100)\n actor_image = models.ImageField(upload_to='home/media/actors', height_field=None, width_field=None, max_length=None, null=True, blank=True) \n actor_discription = models.TextField(null=True)\n gender = models.CharField(max_length=50, choices=GENDER)\n fb_likes = models.IntegerField(null=True)\n date_of_birth = models.DateField(null=True)\n movies = models.ManyToManyField('Movie', through='Cast', blank=True)\n\n\n def __str__(self):\n return self.name\n\nclass Director(models.Model):\n GENDER = (\n ('male', 'Male'),\n ('female', 'Female'),\n ('other', 'Other')\n )\n name = models.CharField(max_length=100)\n director_image = models.ImageField(upload_to='home/media/directors', height_field=None, width_field=None, max_length=None, null=True, blank=True)\n director_discription = models.TextField(null=True)\n gender = models.CharField(max_length=50, choices=GENDER,null=True)\n no_of_facebook_likes = models.IntegerField(default=0)\n\n def __str__(self):\n return self.name\n\nclass Movie(models.Model):\n\n GENRES = (\n ('Action', 'Action'),\n ('Adventure', 'Adventure'),\n ('Animation', 'Animation'),\n ('Comedy', 'Comedy'),\n ('Documentary', 'Documentary'),\n ('Drama', 'Drama'),\n ('Family', 'Family'),\n ('Fantasy', 'Fantasy'),\n ('Horror', 'Horror'),\n ('Romance', 'Romance'),\n ('Sci-fi', 'Sci-fi'),\n ('Thriller', 'Thriller')\n \n )\n movie_id = models.CharField(max_length=500, primary_key=True)\n name = models.CharField(max_length=200)\n movie_poster = models.ImageField(upload_to='home/media/movies', height_field=None, width_field=None, max_length=None, null=True, blank=True)\n movie_discription = models.TextField(null=True)\n release_date = models.DateField((\"Released Date\"), auto_now=False, auto_now_add=False)\n imdb_link = models.CharField(max_length=1000)\n avg_rating = models.FloatField()\n budget = models.IntegerField(default=0)\n collections = models.FloatField()\n language = models.CharField(max_length=100, default='English')\n country = models.CharField(max_length=100, default='USA')\n likes_on_fb = models.IntegerField(default=0)\n genre = models.CharField(max_length=100,null=True, choices=GENRES)\n director = models.ForeignKey(Director, on_delete=models.CASCADE, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass Cast(models.Model):\n actor = models.ForeignKey(Actor, on_delete=models.CASCADE)\n movie = models.ForeignKey(Movie, verbose_name=(\"Acted Movie\"), on_delete=models.CASCADE)\n role = models.CharField(max_length=50, null=True)\n is_debut_movie = models.BooleanField(default=False)\n\n def __str__(self):\n return self.actor.name + ' ' + self.movie.name + ' ' + self.role\n\n\n\n", "sub_path": "imdb_project/imdb/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 3124, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.db.models.Model", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 60, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 61, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 67, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 70, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 71, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 71, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 77, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 77, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 78, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 78, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 78, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 79, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 80, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 80, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "580750429", "text": "# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass QuoteSpider(scrapy.Spider):\n name = 'quote'\n allowed_domains = ['quotes.toscrape.com']\n start_urls = ['http://quotes.toscrape.com/']\n\n def parse(self, response):\n response_next = response.css('.quote')\n print(response_next)\n for item in response_next:\n text = item.xpath('span[1]/text()').extract()\n author = item.xpath('span[2]/small/text()').extract()\n tags = item.xpath('div/a/text()').extract()\n # tags = item.xpath('div').re('a class=\"tag\" href=\".*?\">(.*?)')\n # tags = item.xpath('div[class=\"tag\"]/a/text').extract()\n print(tags)\n yield {\n 'text': text,\n 'author': author,\n 'tags': tags\n }\n\n next_url = response.xpath('/html/body/div/div[2]/div[1]/nav/ul/li[@class=\"next\"]/a/@href').extract()[0]\n url = 'http://quotes.toscrape.com'+next_url\n yield scrapy.Request(url, callback=self.parse)\n", "sub_path": "scrapy/quotes/quotes/scrapy/quote.py", "file_name": "quote.py", "file_ext": "py", "file_size_in_byte": 1031, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "scrapy.Spider", "line_number": 5, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "462825746", "text": "\"\"\"Hold functions to assist main.py.\"\"\"\r\nimport os\r\nimport json\r\nimport pickle\r\nimport time\r\nimport requests\r\nimport sql_helper\r\nimport pyfy\r\n\r\n\r\ndef add_track_to_user_library(user_id, track_id):\r\n \"\"\"Add track to user's library stored as a pkl.\"\"\"\r\n if not os.path.exists(\"var/libraries\"):\r\n os.mkdir(\"var/libraries\")\r\n if not os.path.exists(\"var/libraries/{0}_library.txt\".format(user_id)):\r\n with open(\"var/libraries/{0}_library.txt\".format(user_id), \"w+\") as f:\r\n f.write(\"{}\\n\".format(track_id))\r\n else:\r\n f_read = open(\"var/libraries/{0}_library.txt\".format(user_id), \"r\")\r\n songs = f_read.readlines()\r\n f_read.close()\r\n with open(\"var/libraries/{0}_library.txt\".format(user_id), \"a+\") as f:\r\n if \"{}\\n\".format(track_id) not in songs:\r\n f.write(\"{}\\n\".format(track_id))\r\n else:\r\n print(\"already in there\")\r\n\r\n\r\ndef get_user_tracks(in_spt, user_id):\r\n \"\"\"Get the tracks saved in a user's library.\"\"\"\r\n lim = 50\r\n tracks = in_spt.user_tracks(limit=lim)\r\n num_tracks = 0 \r\n\r\n to_continue = True\r\n\r\n while to_continue:\r\n if not tracks[\"next\"]:\r\n to_continue = False\r\n for track in tracks['items']:\r\n track_info = track['track']\r\n track_dict = {\r\n \"name\": track_info[\"name\"],\r\n \"popularity\": track_info[\"popularity\"],\r\n \"album_id\": track_info[\"album\"][\"id\"],\r\n \"id\": track_info[\"id\"],\r\n \"artist_names\": [],\r\n \"artist_ids\": []\r\n }\r\n\r\n if sql_helper.track_exists_in_db(track_dict[\"id\"]):\r\n add_track_to_user_library(user_id, track_dict[\"id\"])\r\n else:\r\n\r\n for artist in track_info[\"artists\"]:\r\n track_dict[\"artist_ids\"].append(artist[\"id\"])\r\n track_dict[\"artist_names\"].append(artist[\"name\"])\r\n\r\n # print(track_dict)\r\n\r\n for _ in range(10):\r\n try:\r\n track_features = in_spt.tracks_audio_features(track_dict[\"id\"])\r\n break\r\n except pyfy.excs.ApiError as e:\r\n print(\"SHIT\")\r\n if e.http_response.status_code == 429:\r\n time.sleep(int(e.http_response.headers['Retry-After']) * 2)\r\n else:\r\n print(\"Unkown problem with response.\")\r\n exit(1)\r\n \r\n add_track_to_user_library(user_id, track_dict[\"id\"])\r\n\r\n for _ in range(10):\r\n try:\r\n track_dict.update(\r\n get_desired_album_info(\r\n in_spt.albums(\r\n [track_dict[\"album_id\"]])))\r\n break\r\n except pyfy.excs.ApiError as e:\r\n print(\"FUCK\")\r\n if e.http_response.status_code == 429:\r\n time.sleep(int(e.http_response.headers['Retry-After']) * 2)\r\n else:\r\n print(\"Unkown problem with response.\")\r\n exit(1)\r\n\r\n sql_helper.add_track(track_dict[\"id\"], track_dict[\"name\"],\r\n track_dict[\"popularity\"], track_dict[\"album_id\"],\r\n track_dict[\"album_name\"], track_dict[\"album_popularity\"],\r\n json.dumps(track_dict[\"artist_ids\"]),\r\n json.dumps(track_dict[\"artist_names\"]),\r\n track_dict[\"release_date\"],\r\n track_dict[\"release_date_precision\"],\r\n track_features)\r\n num_tracks += lim\r\n for _ in range(10):\r\n try:\r\n tracks = in_spt.user_tracks(limit=lim, offset=num_tracks)\r\n break\r\n except pyfy.excs.ApiError as e:\r\n print(\"PISS\")\r\n if e.http_response.status_code == 429:\r\n time.sleep(int(e.http_response.headers['Retry-After']) * 2)\r\n else:\r\n print(\"Unkown problem with response.\")\r\n exit(1)\r\n print(\"Looked at roughly {} tracks\".format(num_tracks))\r\n return str(num_tracks)\r\n\r\n\r\ndef get_desired_album_info(album_response_json):\r\n \"\"\"Get only wanted features from an album.\"\"\"\r\n return {\"album_name\": album_response_json[\"name\"],\r\n \"album_popularity\": album_response_json[\"popularity\"],\r\n \"release_date\": album_response_json[\"release_date\"],\r\n \"release_date_precision\": album_response_json[\"release_date_precision\"],\r\n }\r\n", "sub_path": "api/helper.py", "file_name": "helper.py", "file_ext": "py", "file_size_in_byte": 4928, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.exists", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sql_helper.track_exists_in_db", "line_number": 51, "usage_type": "call"}, {"api_name": "pyfy.excs", "line_number": 65, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 68, "usage_type": "call"}, {"api_name": "pyfy.excs", "line_number": 82, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 85, "usage_type": "call"}, {"api_name": "sql_helper.add_track", "line_number": 90, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 93, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 94, "usage_type": "call"}, {"api_name": "pyfy.excs", "line_number": 103, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "167134612", "text": "## Load Import necessary dependencies\nimport numpy as np # linear algebra\nimport os # accessing directory structure\nimport pandas as pd # data processing, CSV file I/O\nimport matplotlib.pyplot as plt # plotting\nimport seaborn as sns\nfrom sklearn import preprocessing\n\n## Load and Read DataSets\ndf = pd.read_csv('train.csv', sep=',', na_values=['N/A', 'no', '?'])\n## Return the first n rows.\nprint(df.head(10)) # n= 10\n## method for prints information about a DataFrame including the index dtype and columns, non-null values and memory usage\ndf.info() # rows = 318438 , columns = 18\n## To Visualize Data\n### pairplot will plot pairwise relationships across an entire dataframe (for the numerical columns)\n### and supports a color hue argument (for categorical columns)\n# sns.pairplot(df)\n# plt.savefig('visualize.jpg')\n# plt.show()\n# sns.pairplot(df, hue='Stay', height=3, aspect=1.3) #Use hue to show graph based on the hue category values\n# plt.savefig('visualize_hue_stay.jpg')\n# plt.show()\n# sns.jointplot(x='Admission_Deposit', y='Stay', data=df, kind='scatter')\n# plt.savefig('visualize_Admission_deposit_with_stay.jpg')\n# plt.show()\n#### Feature Transformations\n## Check Missing Data\n### number of missing data in City_Code_Patient column = 4532,\n### number of missing data in Bed Grade = 113 , missing data is less, so drop rows which contain missing data\nprint(df.isnull().sum())\nprint(df.describe())\nprint(df['Age'].value_counts())\n## Work with Missing Data\n## Drop Missing Data\ndf = df.dropna(axis=0) # drop rows from a data set containing missing values\ndf.info()\n## Check Missing Data\nprint(df.isnull().sum())\n\ncolumns =['case_id', 'Hospital_type_code', 'Hospital_region_code', 'Ward_Facility_Code', 'patientid', 'City_Code_Patient']\ndf = df.drop(columns, axis=1)\ndf.info()\n## # Converting float64 to int type\ndf['Bed Grade'] = df['Bed Grade'].astype(np.int64)\ndf['Admission_Deposit'] = df['Admission_Deposit'].astype(np.int64)\n\ndf.info()\n## Work with Categorical Data ## columns [Department, Ward_Type, Type of Admission, Severity of Illness, Age, Stay ]\n\ndf = pd.get_dummies(df, columns=['Department', 'Ward_Type', 'Type of Admission', 'Severity of Illness', 'Age'], drop_first=True)\nprint(df.columns)\ndf.info() # (total 29 columns), 313793 entries (rows)\nle = preprocessing.LabelEncoder() # to convert Y from categorical to label encoder\ndf['Stay'] = le.fit_transform(df['Stay'])\ndf['Stay'] = df['Stay'].astype(int)\nprint(list(le.classes_)) #['0-10', '11-20', '21-30', '31-40', '41-50', '51-60', '61-70', '71-80', '81-90', '91-100', 'More than 100 Days']\n#print(list(le.inverse_transform([0, 1, 2,3, 4, 5, 6, 7, 8, 9, 10])))\n\n## Detect and Handle Outliers\ncolumns =['Hospital_code','City_Code_Hospital', 'Available Extra Rooms in Hospital', 'Bed Grade', 'Visitors with Patient', 'Admission_Deposit']\n# for col in columns: # to show outliers\n# sns.boxplot(x=col, data=df)\n# sns.stripplot(x=col, data=df, color=\"#474646\")\n# plt.show()\n\nfrom datasist.structdata import detect_outliers\noutliers_indices = detect_outliers(df, 0, columns)\nprint(len(outliers_indices))\n# handle outliers\ndf.drop(outliers_indices, inplace=True)\ndf.info()\n\n### Deal with Imbalanced classes ## Stay column\nprint(df['Stay'].value_counts())\nfrom sklearn.model_selection import train_test_split\nx = df.drop('Stay', axis=1)\ny = df['Stay']\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state=22)\nfrom imblearn.over_sampling import SMOTE\nsmote = SMOTE(random_state=22)\n# make smote in only training set\nx_train, y_train = smote.fit_sample(x_train, y_train)\nprint(y_train.value_counts())\nprint(x_train.shape)\n## features scaling\nfrom sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler\nscaling = StandardScaler()\nx_train = scaling.fit_transform(x_train)\nx_test = scaling.transform(x_test)\n#### Train model\nfrom sklearn.ensemble import RandomForestClassifier\nmodel = RandomForestClassifier()\nmodel.fit(x_train, y_train)\n## Evaluate Model\ny_pred = model.predict(x_test)\nfrom sklearn.metrics import classification_report, accuracy_score, confusion_matrix\nprint(accuracy_score(y_test, y_pred))\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))\n\n## in case used cross Validation\n# from sklearn.model_selection import cross_validate, cross_val_predict, cross_val_score\n# from sklearn.metrics import classification_report, accuracy_score, confusion_matrix\n#\n# cv_result = cross_validate(model, x_train, y_train, cv=10, return_train_score=True)\n# print(cv_result)\n# print(cv_result['test_score'].mean())\n# Y_predict = cross_val_predict(model, x_test, y_test, cv=10)\n# print(accuracy_score(Y_predict, y_test))\n\n\n", "sub_path": "Healthcare Analytics/project/random_forest.py", "file_name": "random_forest.py", "file_ext": "py", "file_size_in_byte": 4765, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.int64", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pandas.get_dummies", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 54, "usage_type": "name"}, {"api_name": "datasist.structdata.detect_outliers", "line_number": 68, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 79, "usage_type": "call"}, {"api_name": "imblearn.over_sampling.SMOTE", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 98, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "393224801", "text": "from flask import Flask, render_template,request,session,redirect\nfrom flask_sqlalchemy import SQLAlchemy \nfrom flask_mail import Mail \nimport json\nfrom datetime import datetime\n\n\nwith open('config.json','r') as c:\n\tparams=json.load(c)[\"params\"]\n\nlocal_server=True\napp=Flask(__name__)\napp.secret_key=\"super-secret-key\"\napp.config.update(\n\tMAIL_SERVER='smtp.gmail.com',\n\tMAIL_PORT='465',\n\tMAIL_USE_SSL=True,\n\tMAIL_USERNAME=params['gmail-user'],\n\tMAIL_PASSWORD=params['gmail-password']\n\n)\nmail=Mail(app)\n\nif local_server:\n\tapp.config['SQLALCHEMY_DATABASE_URI']=params['local_uri']\nelse:\n\tapp.config['SQLALCHEMY_DATABASE_URI']=params['prod_uri']\ndb=SQLAlchemy(app)\n\nclass Contacts(db.Model):\n\n sno = db.Column(db.Integer, primary_key=True)\n Name = db.Column(db.String(80), nullable=False)\n Phone_no = db.Column(db.String(12), nullable=False)\n messages = db.Column(db.String(120), nullable=False)\n Date = db.Column(db.String(12),nullable=True)\n Email = db.Column(db.String(20), nullable=False)\n\nclass Posts(db.Model):\n\n sno = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(80), nullable=False)\n slug = db.Column(db.String(21), nullable=False)\n content = db.Column(db.String(120), nullable=False)\n tagline = db.Column(db.String(120), nullable=False)\n date = db.Column(db.String(12),nullable=True)\n img_file = db.Column(db.String(12),nullable=True)\n \n \n\n@app.route('/')\ndef home():\n\tposts=Posts.query.filter_by().all()[0:params['no_of_posts']]\n\treturn render_template('index.html',params=params,posts=posts)\n\n@app.route('/about')\ndef about():\n\treturn render_template('about.html',params=params)\n\n@app.route('/dashboard',methods=['GET','POST'])\ndef dashboard():\n\tif 'user' in session and session['user']==params['admin_user']:\n\t\tposts=Posts.query.all()\n\t\treturn render_template('dashboard.html',params=params,posts=posts)\n\t\t\n\tif request.method==\"POST\":\n\t\tusername=request.form.get('uname')\n\t\tuserpass=request.form.get('pass')\n\t\tif username==params['admin_user'] and userpass==params['admin_password']:\n\t\t\t#set the session variable\n\t\t\tsession['user']=username\n\t\t\tposts=Posts.query.all()\n\t\t\treturn render_template('dashboard.html',params=params,posts=posts)\n\t\t\t\n\treturn render_template('login.html',params=params)\n\n@app.route('/post/',methods=['GET'])\ndef post_route(post_slug):\n\tpost=Posts.query.filter_by(slug=post_slug).first()\n\treturn render_template('post.html',params=params,post=post)\n\n@app.route(\"/edit/\",methods=['GET','POST'])\ndef edit(sno):\n\tif 'user' in session and session['user']==params['admin_user']:\n\t\tif request.method==\"POST\":\n\t\t\tbox_title=request.form.get('title')\n\t\t\ttline=request.form.get('tline')\n\t\t\tslug=request.form.get('slug')\n\t\t\tcontent=request.form.get('content')\n\t\t\timg_file=request.form.get('img_file')\n\t\t\tdate=datetime.now()\n\n\t\t\tif sno=='0':\n\t\t\t\tpost=Posts(title=box_title,slug=slug,content=content,tagline=tline,img_file=img_file,date=date)\n\t\t\t\tdb.session.add(post)\n\t\t\t\tdb.session.commit()\n\t\t\telse:\n\t\t\t\tpost=Posts.query.filter_by(sno=sno).first()\n\t\t\t\tpost.title=box_title\n\t\t\t\tpost.slug=slug\n\t\t\t\tpost.content=content\n\t\t\t\tpost.tagline=tline\n\t\t\t\tpost.img_file=img_file\n\t\t\t\tpost.date=date\n\t\t\t\tdb.session.commit()\n\t\t\t\treturn redirect('/edit/'+sno)\n\t\tpost=Posts.query.filter_by(sno=sno).first()\n\t\treturn render_template('edit.html', params=params,post=post)\n\n\n\n@app.route('/contact',methods=['GET','POST'])\ndef contact():\n\tif request.method=='POST':\n\n\t\t'''add entry to the db.'''\n\t\tname=request.form.get('name')\n\t\temail=request.form.get('email')\n\t\tphone=request.form.get('phone')\n\t\tmessage=request.form.get('message')\n\t\t\n\t\tentry=Contacts(Name=name,Phone_no=phone,messages=message,Date=datetime.now(),Email=email)\n\t\tdb.session.add(entry)\n\t\tdb.session.commit()\n\t\tmail.send_message('New Message from'+name,\n\t\t\tsender=email,\n\t\t\trecipients=[params['gmail-user']],\n\t\t\tbody=message+'\\n'+phone\n\t\t\t)\n\n\n\treturn render_template('contact.html',params=params)\n\n\n\napp.run(debug=True)", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3990, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "json.load", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_mail.Mail", "line_number": 22, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 91, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 106, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 114, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 117, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 117, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 117, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 118, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 118, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 119, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 120, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 120, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 122, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "461635217", "text": "# 'matplotlib' is a python plotting library which gives wide variety of plotting methods,\n# highly used with OpenCV for different kinds of graphs of images\n# graphs are generally used to analyse the images \n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport cv2 as cv\n\nimg = cv.imread('used_images_videos/lena.jpg', -1)\ncv.imshow('image', img)\nimg = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n\nplt.imshow(img) # takes images in rbg format\nplt.xticks([]), plt.yticks([]) \nplt.show()\n\ncv.waitKey(0)\ncv.destroyAllWindows()\n", "sub_path": "Practice/opencv/matplotlib_use.py", "file_name": "matplotlib_use.py", "file_ext": "py", "file_size_in_byte": 530, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "cv2.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 11, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "cv2.waitKey", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "36509247", "text": "import cv2 as cv\nfrom mchqr.image import Image\nfrom mchqr.solution import AlgoPair, Detected, DetectedList\nimport numpy as np\nfrom pyzbar.pyzbar import decode, Decoded, ZBarSymbol\nfrom typing import Callable, List\n\nDecodedList = List[Decoded]\nDetector = Callable[[Image], AlgoPair]\n\ndef algo_pair(image: Image, detected_list: DetectedList):\n\treturn AlgoPair(image.name, detected_list)\n\ndef detector(function: Detector):\n\tdetectors[function.__name__] = function\n\treturn function\n\ndetectors = {}\n\n@detector\ndef cv_detector(image: Image):\n\t_, data_list, bounding_boxes, _ = cv.QRCodeDetector().detectAndDecodeMulti(image.matrix)\n\n\treturn algo_pair(\n\t\timage, [\n\t\t\tDetected(\n\t\t\t\tdata_list[i],\n\t\t\t\tnp.int32(\n\t\t\t\t\tbounding_boxes[i]\n\t\t\t\t)\n\t\t\t)\n\t\t\tfor i in range(\n\t\t\t\tlen(data_list)\n\t\t\t)\n\t\t]\n\t)\n\n@detector\ndef zbar(image: Image):\n\treturn algo_pair(\n\t\timage, [\n\t\t\tDetected(\n\t\t\t\tdecoded.data.decode('utf-8'),\n\t\t\t\tnp.array(decoded.polygon)\n\t\t\t)\n\t\t\tfor decoded in decode(\n\t\t\t\timage.matrix, [ZBarSymbol.QRCODE]\n\t\t\t)\n\t\t]\n\t)\n", "sub_path": "source/mchqr/detector.py", "file_name": "detector.py", "file_ext": "py", "file_size_in_byte": 1009, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "typing.List", "line_number": 8, "usage_type": "name"}, {"api_name": "pyzbar.pyzbar.Decoded", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 9, "usage_type": "name"}, {"api_name": "mchqr.image.Image", "line_number": 9, "usage_type": "name"}, {"api_name": "mchqr.solution.AlgoPair", "line_number": 9, "usage_type": "name"}, {"api_name": "mchqr.image.Image", "line_number": 11, "usage_type": "name"}, {"api_name": "mchqr.solution.DetectedList", "line_number": 11, "usage_type": "name"}, {"api_name": "mchqr.solution.AlgoPair", "line_number": 12, "usage_type": "call"}, {"api_name": "mchqr.image.Image", "line_number": 21, "usage_type": "name"}, {"api_name": "cv2.QRCodeDetector", "line_number": 22, "usage_type": "call"}, {"api_name": "mchqr.solution.Detected", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 28, "usage_type": "call"}, {"api_name": "mchqr.image.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "mchqr.solution.Detected", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "pyzbar.pyzbar.decode", "line_number": 46, "usage_type": "call"}, {"api_name": "pyzbar.pyzbar.ZBarSymbol.QRCODE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pyzbar.pyzbar.ZBarSymbol", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "124644850", "text": "# -*- coding: utf-8 -*-\n\nimport os\nfrom config.settings import BASE_DIR, PATHS\nimport qlikview.orcamento as orcamento\nimport qlikview.reducao as reducao\nimport qlikview.contratos as contratos\n\n\npath = os.path.join(BASE_DIR,\n PATHS['dir']['SAÍDA'])\n\ndef run():\n df = orcamento.hierarquia.parse_centro_custo()\n filename = os.path.join(path, 'hierarquia centros de custo.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n df_classecusto = orcamento.hierarquia.parse_classe_custo()\n filename = os.path.join(path, 'hierarquia classes de custo.xlsx')\n df_classecusto.to_excel(filename, index=False)\n\n df_novopan = orcamento.novopan.parse()\n filename = os.path.join(path, 'novopan.xlsx')\n print(filename)\n df_novopan.to_excel(filename, index=False)\n\n df_acompanhamento = reducao.realizacao.parse(df_classecusto)\n filename = os.path.join(path, 'acompanhamento redução.xlsx')\n df_acompanhamento.to_excel(filename, index=False)\n\n del df_classecusto\n del df_novopan\n del df_acompanhamento\n\n df = orcamento.compartilhado.parse()\n filename = os.path.join(path, 'compartilhado.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n df = orcamento.justificativas.parse()\n filename = os.path.join(path, 'justificativas.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n df = orcamento.pacotes.parse()\n filename = os.path.join(path, 'pacotes.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n df = reducao.contratos.parse()\n filename = os.path.join(path, 'contratos redução orçamentária.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n df = reducao.comentarios.parse()\n filename = os.path.join(path, 'comentários redução orçamentária.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n df = reducao.frs.parse()\n filename = os.path.join(path, 'frs redução orçamentária.xlsx')\n df.to_excel(filename, index=False)\n del df\n\n # df_contratos = contratos.contratos.parse()\n # df_frs = contratos.frscompartilhado.parse_sede()\n # df_contratos = contratos.process.status_prazo(df_contratos)\n # df_contratos = contratos.process.status_valor(df_contratos, df_frs)\n #\n # filename = os.path.join(path, 'contratos vigentes sede.xlsx')\n # df_contratos.to_excel(filename, index=False)\n # del df_contratos\n #\n # filename = os.path.join(path, 'custos contratos sede.xlsx')\n # df_frs.to_excel(filename, index=False)\n # del df_frs\n\nif __name__ == '__main__':\n run()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2534, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "config.settings.BASE_DIR", "line_number": 10, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "config.settings.PATHS", "line_number": 11, "usage_type": "name"}, {"api_name": "qlikview.orcamento.hierarquia.parse_centro_custo", "line_number": 14, "usage_type": "call"}, {"api_name": "qlikview.orcamento.hierarquia", "line_number": 14, "usage_type": "attribute"}, {"api_name": "qlikview.orcamento", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "qlikview.orcamento.hierarquia.parse_classe_custo", "line_number": 19, "usage_type": "call"}, {"api_name": "qlikview.orcamento.hierarquia", "line_number": 19, "usage_type": "attribute"}, {"api_name": "qlikview.orcamento", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "qlikview.orcamento.novopan.parse", "line_number": 23, "usage_type": "call"}, {"api_name": "qlikview.orcamento.novopan", "line_number": 23, "usage_type": "attribute"}, {"api_name": "qlikview.orcamento", "line_number": 23, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "qlikview.reducao.realizacao.parse", "line_number": 28, "usage_type": "call"}, {"api_name": "qlikview.reducao.realizacao", "line_number": 28, "usage_type": "attribute"}, {"api_name": "qlikview.reducao", "line_number": 28, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "qlikview.orcamento.compartilhado.parse", "line_number": 36, "usage_type": "call"}, {"api_name": "qlikview.orcamento.compartilhado", "line_number": 36, "usage_type": "attribute"}, {"api_name": "qlikview.orcamento", "line_number": 36, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "qlikview.orcamento.justificativas.parse", "line_number": 41, "usage_type": "call"}, {"api_name": "qlikview.orcamento.justificativas", "line_number": 41, "usage_type": "attribute"}, {"api_name": "qlikview.orcamento", "line_number": 41, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "qlikview.orcamento.pacotes.parse", "line_number": 46, "usage_type": "call"}, {"api_name": "qlikview.orcamento.pacotes", "line_number": 46, "usage_type": "attribute"}, {"api_name": "qlikview.orcamento", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "qlikview.reducao.contratos.parse", "line_number": 51, "usage_type": "call"}, {"api_name": "qlikview.reducao.contratos", "line_number": 51, "usage_type": "attribute"}, {"api_name": "qlikview.reducao", "line_number": 51, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "qlikview.reducao.comentarios.parse", "line_number": 56, "usage_type": "call"}, {"api_name": "qlikview.reducao.comentarios", "line_number": 56, "usage_type": "attribute"}, {"api_name": "qlikview.reducao", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "qlikview.reducao.frs.parse", "line_number": 61, "usage_type": "call"}, {"api_name": "qlikview.reducao.frs", "line_number": 61, "usage_type": "attribute"}, {"api_name": "qlikview.reducao", "line_number": 61, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "405010029", "text": "# Copyright (c) 2014, The MITRE Corporation. All rights reserved.\r\n# See LICENSE.txt for complete terms.\r\n\r\nimport unittest\r\n\r\nfrom stix.campaign import Campaign\r\nfrom cybox.test import EntityTestCase\r\n\r\n\r\nclass CampaignTest(EntityTestCase, unittest.TestCase):\r\n klass = Campaign\r\n _full_dict = {\r\n 'id': \"example:Campaign-341\",\r\n 'timestamp': \"2014-01-31T06:14:46\",\r\n 'version': '1.1.1',\r\n 'title': 'Purple Elephant',\r\n 'description': 'A pretty novice set of actors.',\r\n 'short_description': 'novices',\r\n 'names': {\r\n 'names': [\"Dancing Hippos\", \"Crazy Squirrels\"],\r\n },\r\n 'intended_effects': [\r\n {\r\n 'timestamp': \"2014-03-11T06:24:26\",\r\n 'value': \"Doing bad stuff\",\r\n },\r\n {\r\n 'timestamp': \"2014-03-21T06:24:26\",\r\n 'value': \"Doing really bad stuff\",\r\n }\r\n ],\r\n 'status': \"Ongoing\",\r\n 'related_ttps': {\r\n 'scope': \"exclusive\",\r\n 'ttps': [\r\n {\r\n 'confidence': {'value': {'value': \"Medium\", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},\r\n 'ttp': {'title': \"Stealth\", 'version': '1.1.1'},\r\n }\r\n ]\r\n },\r\n 'related_incidents': {\r\n 'scope': \"inclusive\",\r\n 'incidents': [\r\n {\r\n 'confidence': {'value': {'value': \"Medium\", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},\r\n 'incident': {'idref': \"example:Incident-2\",\r\n 'version': '1.1.1'},\r\n }\r\n ]\r\n },\r\n 'related_indicators': {\r\n 'scope': \"inclusive\",\r\n 'indicators': [\r\n {\r\n 'confidence': {'value': {'value': \"Medium\", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},\r\n 'indicator': {'idref': \"example:Indicator-77\",\r\n 'version': '2.1.1'},\r\n }\r\n ]\r\n },\r\n 'attribution': [{\r\n 'scope': \"inclusive\",\r\n 'threat_actors': [\r\n {\r\n 'confidence': {'value': {'value': \"Medium\", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},\r\n 'threat_actor': {'title': \"Campaign Actor #1\",\r\n 'version': '1.1.1'},\r\n },\r\n {\r\n 'threat_actor': {'idref': \"example:ThreatActor-111\",\r\n 'version': '1.1.1'},\r\n },\r\n ],\r\n }],\r\n 'associated_campaigns': {\r\n 'scope': \"inclusive\",\r\n 'campaigns': [\r\n {\r\n 'confidence': {'value': {'value': \"Medium\", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},\r\n 'information_source': {'description': \"Threat Feed\"},\r\n 'campaign': {'title': \"Baby Elephant\", 'version': '1.1.1'},\r\n }\r\n ],\r\n },\r\n 'confidence': {'value': {'value': \"Medium\", 'xsi:type':'stixVocabs:HighMediumLowVocab-1.0'}},\r\n 'activity': [\r\n {\r\n 'date_time': \"2012-01-01T08:45:31\",\r\n 'description': \"The first bad thing\"\r\n },\r\n {\r\n 'date_time': \"2012-01-02T08:45:31\",\r\n 'description': \"Another bad thing\"\r\n },\r\n ],\r\n 'information_source': {\r\n 'description': \"A former member of the campaign.\",\r\n 'identity': {\r\n 'name': \"Mr. D. Fector\",\r\n },\r\n },\r\n 'handling': [\r\n {\r\n 'marking_structures': [{\r\n 'marking_model_name': 'TLP',\r\n 'color': \"RED\",\r\n 'xsi:type': \"tlpMarking:TLPMarkingStructureType\",\r\n }]\r\n }\r\n ],\r\n 'related_packages': {\r\n 'packages': [\r\n {'idref': \"example:Package-AB\", 'relationship': \"Parent\"},\r\n {'idref': \"example:Package-CD\", 'relationship': \"Child\"}\r\n ]\r\n }\r\n }\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n", "sub_path": "stix-1.1.1.0/stix/test/campaign_test.py", "file_name": "campaign_test.py", "file_ext": "py", "file_size_in_byte": 4342, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "cybox.test.EntityTestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "stix.campaign.Campaign", "line_number": 11, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 122, "usage_type": "call"}]} +{"seq_id": "13108336", "text": "import urllib.request as request\nimport json\nimport ssl\nimport pymysql\nssl._create_default_https_context = ssl._create_unverified_context\n\ndb=pymysql.connect(host=\"127.0.0.1\",user=\"root\",password=\"5566\",database=\"TravelWeb\")\n\n# db = mysql.connector.connect(\n# host=\"127.0.0.1\", user=\"root\", password=\"5566\", database=\"TravelWeb\"\n# )\n\ncur = db.cursor()\n\n\nfilename = \"taipei-attractions.json\"\nwith open(filename) as json_file:\n data = json.load(json_file)\n\nlandList = data[\"result\"][\"results\"]\n\n# 景點放入db\nfor n in landList:\n attrId = n[\"_id\"] # 景點編號\n landName = n[\"stitle\"] # 景點名稱\n landMrt = n[\"MRT\"] # 景點捷運\n landType = n[\"CAT2\"] # 景點類別\n\n # 處理全部景點照片網址\n photoUrl = n[\"file\"].split(\"http\") #photoUrl是list\n pic_list=[]\n for i in photoUrl: # 逐一檢查網址是否為圖片檔\n my_suffixes = (\"JPG\", \"PNG\", \"jpg\", \"png\")\n if i.endswith(my_suffixes) != True or i == '' :\n continue\n pic='http'+ i\n pic_list.append(pic) # pic_list為所有可用景點網址的列表\n pic_list = str(pic_list) \n\n landIntro = n[\"xbody\"] # 景點簡介\n landAddr = n[\"address\"] # 景點地址\n landTrans = n[\"info\"] # 景點交通\n landLati = n[\"latitude\"] #景點緯度\n landLongi = n[\"longitude\"] #景點經度\n cur.execute('insert into `attractions`(attrId, title, mrt, type, pic_link, introduction, address, transportation, latitude, longitude) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',(attrId, landName, landMrt, landType, pic_list, landIntro, landAddr, landTrans, landLati, landLongi))\n db.commit()\n # print(pic_list)\n\n# 處理景點照片\n# photoUrl = landList[0][\"file\"].split(\"http\") #photoUrl是list\n# pic_list=[]\n# for i in photoUrl: # 逐一檢查網址是否為圖片檔\n# my_suffixes = (\"JPG\", \"PNG\", \"jpg\", \"png\")\n# if i.endswith(my_suffixes) != True or i == '' :\n# continue\n# pic='http'+ i\n# pic_list.append(pic) # pic_list為所有可用景點網址的列表\n# pic_list = str(pic_list) \n", "sub_path": "data/set-attr.py", "file_name": "set-attr.py", "file_ext": "py", "file_size_in_byte": 2077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "ssl._create_default_https_context", "line_number": 5, "usage_type": "attribute"}, {"api_name": "ssl._create_unverified_context", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "json.load", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "253658980", "text": "import argparse\nimport re\nfrom typing import Iterable, Dict\n\nimport requests\nfrom auth0.v3.management import Auth0\nfrom yaml import load\nfrom .utils import (\n ENDC,\n WARNING,\n auth0_token,\n get_users_from_auth0,\n stormpath_connection,\n)\n\n\ndef prompt_user(prompt: str='Please enter [y/n]: ', ttl=3) -> bool:\n \"\"\" \n Prompt the user for some action\n \n :param prompt: Prompt to display to the user\n :param ttl: Tries to live (an exception is thrown)\n \"\"\"\n if type(ttl) == int:\n if ttl <= 0:\n raise Exception('invalid choice entered.')\n ttl -= 1\n response = input(prompt)\n if response.lower() in {'no', 'n'}:\n return False\n elif response.lower() in {'yes', 'y'}:\n return True\n\n return prompt_user('Please enter [y/n]: ', ttl)\n\n\ndef add_to_auth0_group(auth0_users: Iterable[Dict], auth0_authz_jwt: str,\n group: Dict, webtask_url: str, interactive=True,\n dry_run: bool = False):\n \"\"\"\n Issue mass password resets of Auth0 user passwords\n\n :param auth0_users: Iterable of Auth0 user dicts\n :param auth0_authz_jwt: JWT with Auth0 Authorization audience\n :param interactive: Prompt before making any changes \n :param dry_run: Do not actually delete resources\n :param limit: Limit changes to these users\n :param skip: Skip these users\n :return: \n \"\"\"\n group_url_template = f\"{webtask_url}/users/%s/groups\"\n headers = {'Authorization': f'Bearer {auth0_authz_jwt}'}\n template = 'Password email to user \"{}\" {}'\n for user in auth0_users:\n email = user['email']\n user_id = user['user_id']\n\n if interactive:\n if not prompt_user(f'Add {email} to group {group[\"name\"]}? [y/n]: '):\n continue\n if dry_run:\n print(template.format(email,\n WARNING + 'would have been added to {group_name}' + ENDC))\n else:\n r = requests.patch(\n group_url_template % user_id,\n json=[group[\"id\"]],\n headers=headers\n )\n assert r.status_code == 204, 'Bad status'\n print(f'Added {email} to group \"{group[\"name\"]}\"')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Sync accounts from a Stormpath app to an Auth0 domain')\n parser.add_argument('config', help='configuration file')\n parser.add_argument('group', help='group to add users to')\n parser.add_argument('--regex', help='regex to use in filtering accounts by email')\n parser.add_argument('--dry-run', action='store_true',\n help='print results to screen but do not create/change resources')\n\n parser.add_argument('--yes', action='store_true',\n help='do not prompt when sending resets or deleting users')\n parser.add_argument('--no-prompt', action='store_true',\n help='do not prompt for each user when deleting or sending reset emails')\n\n args = parser.parse_args()\n\n email_regex = None\n if args.regex:\n email_regex = re.compile(args.regex)\n print(f'Filtering users by email with regex: {email_regex}')\n\n with open(args.config) as config_fp:\n config = load(config_fp)\n\n a0_cfg = config['auth0']\n auth0 = Auth0(config['auth0']['domain'], auth0_token(config['auth0']))\n authorization_token = auth0_token(config['auth0'], audience='urn:auth0-authz-api')\n\n token = auth0_token(a0_cfg, )\n auth0_users = get_users_from_auth0(auth0, regex=email_regex)\n add_to_auth0_group(auth0_users, auth0, args.group, authorization_token, a0_cfg['extensions']['authorization']['webtaskUrl'],\n dry_run=args.dry_run, interactive=not args.no_prompt)\n", "sub_path": "groups.py", "file_name": "groups.py", "file_ext": "py", "file_size_in_byte": 3799, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "typing.Iterable", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.WARNING", "line_number": 63, "usage_type": "name"}, {"api_name": "utils.ENDC", "line_number": 63, "usage_type": "name"}, {"api_name": "requests.patch", "line_number": 65, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 74, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 91, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 95, "usage_type": "call"}, {"api_name": "auth0.v3.management", "line_number": 98, "usage_type": "name"}, {"api_name": "auth0.v3.management.Auth0", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.auth0_token", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.auth0_token", "line_number": 99, "usage_type": "call"}, {"api_name": "utils.auth0_token", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.get_users_from_auth0", "line_number": 102, "usage_type": "call"}, {"api_name": "auth0.v3.management", "line_number": 102, "usage_type": "argument"}, {"api_name": "auth0.v3.management", "line_number": 103, "usage_type": "argument"}]} +{"seq_id": "35655151", "text": "from flask import g\nfrom datetime import datetime\nfrom sqlalchemy import Column, Integer, DateTime, ForeignKey, String\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.ext.declarative import declared_attr\nfrom transitions import Machine\nfrom flask_security import current_user\n\n\nclass AuditMixin(object):\n created_at = Column(DateTime, default=datetime.now)\n updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now)\n\n @declared_attr\n def created_by_id(cls):\n return Column(Integer,\n ForeignKey('user.id',\n name='fk_%s_created_by_id'\n % cls.__name__, use_alter=True),\n default=_current_user_id_or_none\n )\n\n @declared_attr\n def created_by(cls):\n return relationship(\n 'User',\n primaryjoin='User.id == %s.created_by_id' % cls.__name__,\n remote_side='User.id'\n )\n\n @declared_attr\n def updated_by_id(cls):\n return Column(Integer,\n ForeignKey('user.id',\n name='fk_%s_updated_by_id' % cls.__name__,\n use_alter=True),\n default=_current_user_id_or_none,\n onupdate=_current_user_id_or_none)\n\n @declared_attr\n def updated_by(cls):\n return relationship(\n 'User',\n primaryjoin='User.id == %s.updated_by_id' % cls.__name__,\n remote_side='User.id'\n )\n\n\ndef _current_user_id_or_none():\n create_user_id = None\n try:\n return g.user.id\n except Exception as e:\n pass\n try:\n return current_user.id\n except Exception as e:\n pass\n return None\n\n\nclass DealStateMixin(object):\n @declared_attr\n def __tablename__(cls):\n return cls.__name__.lower()\n\n @declared_attr\n def status(cls):\n return Column(String())\n\n @property\n def state(self):\n return self.status\n\n @state.setter\n def state(self, value):\n if self.status != value:\n self.status = value\n\n def after_state_change(self):\n self._session.add(self)\n self._session.commit()\n\n @classmethod\n def init_state_machine(cls, obj, *args, **kwargs):\n states = ['prospect', 'lead', 'contracted', 'closed', 'dead']\n transitions = [\n ['activate', 'prospect', 'lead'],\n ['contract', 'lead', 'contracted']\n ['close', 'contracted', 'closed']\n ]\n\n initial = obj.status or states[0]\n\n machine = Machine(model=obj, states=states, transitions=transitions,\n initial=initial,\n after_state_change='after_state_change')\n\n # in case that we need to have machine obj in model obj\n setattr(obj, 'machine', machine)\n", "sub_path": "app/mixins.py", "file_name": "mixins.py", "file_ext": "py", "file_size_in_byte": 2902, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sqlalchemy.Column", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 11, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 11, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 12, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 16, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declared_attr", "line_number": 14, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declared_attr", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 33, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declared_attr", "line_number": 31, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declared_attr", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.g.user", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 52, "usage_type": "name"}, {"api_name": "flask_security.current_user.id", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask_security.current_user", "line_number": 56, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.declarative.declared_attr", "line_number": 63, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 69, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 69, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declared_attr", "line_number": 67, "usage_type": "name"}, {"api_name": "transitions.Machine", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "429464282", "text": "import discord\nfrom discord.ext import commands\nfrom aiohttp import ClientSession\nimport random\nfrom bot.paginators import EmbedPaginator\nfrom bot.reddit import Reddit \n\nclass Anime(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n \n async def anilist_request(self, query: str, variables):\n url = 'https://graphql.anilist.co'\n async with ClientSession() as cs:\n async with cs.post(url, json={'query': query, 'variables': variables}) as r:\n data = await r.json()\n\n return r.status, data\n\n def create_anime_embed(self, anime):\n title = anime['title']['romaji']\n score = anime['meanScore']\n description = anime['description']\n genres = \", \".join(anime['genres'])\n image = anime['coverImage']['large']\n url = anime['siteUrl']\n\n embed_title = f'{title} ({score}/100)'\n embed_description = f'**Genres**: {genres}\\n \\n {description}'\n\n anime_embed = discord.Embed(title=embed_title, description=embed_description, url=url)\n anime_embed.set_image(url=image)\n\n return anime_embed\n\n @commands.group()\n async def anime(self, ctx: commands.Context):\n '''The main commmand for anime!'''\n if ctx.invoked_subcommand is None:\n await ctx.send('**Usage:** \\n `,anime recommend/rec/rmd`\\n `,anime search/s `\\n `,anime meme`')\n\n @anime.command(aliases=['rec', 'rmd'])\n async def recommend(self, ctx: commands.Context):\n '''Recommends a random high rated anime.'''\n \n # First Request:\n query = '''\n query ($page: Int, $perPage: Int) {\n Page (page: $page, perPage: $perPage) {\n pageInfo {\n currentPage\n lastPage\n perPage\n }\n media (type: ANIME, averageScore_greater: 75) {\n id\n }\n }\n }\n '''\n\n variables = {\n 'page': 1,\n 'perPage': 5\n } \n\n status_code, data = await self.anilist_request(query, variables)\n\n if status_code != 200:\n await ctx.send(data['errors'][0]['message'])\n \n pages_amount = data['data']['Page']['pageInfo']['lastPage']\n\n # Second Request:\n\n query = '''\n query ($page: Int, $perPage: Int) {\n Page (page: $page, perPage: $perPage) {\n pageInfo {\n total\n currentPage\n lastPage\n perPage\n }\n media (type: ANIME, averageScore_greater: 75) {\n title {\n romaji\n }\n description(asHtml: false)\n meanScore\n genres\n coverImage {\n large\n medium\n }\n siteUrl\n }\n }\n }\n '''\n\n variables = {\n 'page': random.randint(1, pages_amount),\n 'perPage': 5\n }\n\n status_code, data = await self.anilist_request(query, variables)\n\n if status_code != 200:\n await ctx.send(data['errors'][0]['message'])\n \n page = data['data']['Page']['media']\n anime = page[random.randint(0, len(page) - 1)]\n\n anime_embed = self.create_anime_embed(anime)\n\n await ctx.send(embed=anime_embed)\n \n @anime.command(aliases=['s'])\n async def search(self, ctx: commands.Context, name: str = ''):\n '''Searches for an anime by name'''\n query = '''\n query ($page: Int, $perPage: Int, $search: String) {\n Page (page: $page, perPage: $perPage) {\n pageInfo {\n total\n currentPage\n lastPage\n perPage\n }\n media (type: ANIME, search: $search) {\n title {\n romaji\n }\n meanScore\n description(asHtml: false)\n genres\n coverImage {\n large\n medium\n }\n siteUrl\n }\n }\n }\n '''\n\n variables = {\n 'page': 1,\n 'perPage': 5,\n 'search': name\n }\n\n status_code, data = await self.anilist_request(query, variables)\n\n if status_code != 200:\n await ctx.send(data['errors'][0]['message'])\n \n page = data['data']['Page']['media']\n total = data['data']['Page']['pageInfo']['total']\n pages_amount = data['data']['Page']['pageInfo']['lastPage']\n anime_amount = int(total / pages_amount)\n\n\n #loop through media and get the anime list and paginate them\n embed_list = []\n for i in range(anime_amount):\n anime = page[i]\n anime_embed = self.create_anime_embed(anime)\n embed_list.append(anime_embed)\n \n if anime_amount == 0:\n await ctx.send(f'{name} Not Found!')\n elif anime_amount == 1:\n await ctx.send(embed=embed_list[0])\n else:\n paginator = EmbedPaginator(embeds=embed_list)\n await paginator.run(ctx) \n \n @anime.command()\n async def meme(self, ctx: commands.Context):\n '''Get a random anime meme'''\n reddit = Reddit()\n submission = reddit.get_random_submission('Animemes')\n embed = discord.Embed(title=submission.title)\n embed.set_image(url=submission.url)\n await ctx.send(embed=embed)\n\n @commands.command()\n async def jojo(self, ctx: commands.Context):\n '''Get a random jojo post'''\n subs_list = ['ShitPostCrusaders', 'StardustCrusaders', 'wholesomejojo']\n subreddit_name = random.choice(subs_list)\n\n reddit = Reddit()\n submission = reddit.get_random_submission(subreddit_name)\n embed = discord.Embed(title=submission.title)\n embed.set_image(url=submission.url)\n await ctx.send(embed=embed)\n \n\ndef setup(bot):\n bot.add_cog(Anime(bot))\n", "sub_path": "cogs/anime.py", "file_name": "anime.py", "file_ext": "py", "file_size_in_byte": 6308, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 8, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 8, "usage_type": "name"}, {"api_name": "bot.paginators", "line_number": 10, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 14, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 31, "usage_type": "call"}, {"api_name": "discord.ext.commands.Context", "line_number": 37, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 37, "usage_type": "name"}, {"api_name": "discord.ext.commands.group", "line_number": 36, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 36, "usage_type": "name"}, {"api_name": "discord.ext.commands.Context", "line_number": 43, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 43, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 103, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 113, "usage_type": "call"}, {"api_name": "discord.ext.commands.Context", "line_number": 120, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 120, "usage_type": "name"}, {"api_name": "bot.paginators.EmbedPaginator", "line_number": 177, "usage_type": "call"}, {"api_name": "discord.ext.commands.Context", "line_number": 181, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 181, "usage_type": "name"}, {"api_name": "bot.reddit.Reddit", "line_number": 183, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 185, "usage_type": "call"}, {"api_name": "discord.ext.commands.Context", "line_number": 190, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 190, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 193, "usage_type": "call"}, {"api_name": "bot.reddit.Reddit", "line_number": 195, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 197, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 189, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 189, "usage_type": "name"}, {"api_name": "bot.paginators.add_cog", "line_number": 203, "usage_type": "call"}, {"api_name": "bot.paginators", "line_number": 203, "usage_type": "name"}]} +{"seq_id": "474539906", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 4 15:16:19 2019\r\n\r\n@author: mjrubino\r\n\"\"\"\r\n\r\n# Import modules\r\nimport pandas as pd\r\nfrom pygbif import occurrences as occ\r\nfrom pygbif import species\r\n\r\nimport sys\r\nsys.path.append('C:/Data/USGS Analyses/GAP-Habitat-Map-Assessment/')\r\nimport config\r\nfrom datetime import datetime\r\n\r\nworkDir = 'C:/Data/USGS Analyses/GAP-Habitat-Map-Assessment/'\r\n\r\nbeginTime = datetime.now()\r\nprint(\"*\"*40); print('Began ' + str(beginTime)); print(\"*\"*40)\r\n\r\nsppList=['Accipiter cooperii','Myodes gapperi']\r\n\r\n# Make an empty list to append each species' records\r\nreclst = []\r\n# Make column names for the list\r\nlstcols = ['SppName','nRecords']\r\n\r\n\r\nn = 0\r\n# Loop over each species in the full species list in the config file\r\nfor spp in config.sciNames1590:\r\n \r\n print('Working on the following species:', spp)\r\n recs = occ.search(scientificName = spp,\r\n hasCoordinate=True,\r\n country='US', \r\n geoSpatialIssue=False)\r\n # Not all species have COUNT in their occurrence record dictionary\r\n # !!!!!!!!! WHAT THE FUCK GBIF !!!!!!!!!!!!!!!\r\n # Make sure it does otherwise make it 0.9\r\n if 'count' in recs:\r\n cnt = recs['count']\r\n n = n + cnt\r\n print(' it has', cnt, 'records')\r\n else:\r\n print(' it has UNKNOWN NUMBER of records',)\r\n cnt = 0.9\r\n # Append to the record list\r\n reclst.append([spp,cnt])\r\n\r\n\r\nprint('\\n TOTAL NUMBER OF RECORDS FOR THIS SPECIES LIST =',n)\r\n\r\n# Make a dataframe out of the compiled lists and save as CSV\r\ndfRecordCount = pd.DataFrame(data=reclst, columns=lstcols)\r\ndfRecordCount.to_csv(workDir + \"SpeciesOccurrenceCounts-GBIF.csv\")\r\n\r\ndel reclst,lstcols,spp,recs,cnt,dfRecordCount\r\n\r\nendTime = datetime.now(); procDelta = endTime - beginTime\r\nprint(\"\\n\\nProcessing time = \" + str(procDelta) + \"\\n\")\r\nprint('*'*35,'DONE','*'*35)", "sub_path": "Scripts/GBIFRecordCount.py", "file_name": "GBIFRecordCount.py", "file_ext": "py", "file_size_in_byte": 1914, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "name"}, {"api_name": "config.sciNames1590", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygbif.occurrences.search", "line_number": 36, "usage_type": "call"}, {"api_name": "pygbif.occurrences", "line_number": 36, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "86265927", "text": "__author__ = 'ict'\n\nimport json\nimport os\n\n\nprofile_dir = \"d:/steam_profile\"\ngraph_file = \"d:/steam_friend.txt\"\ngame_mark_file = \"d:/steam_game.txt\"\nmapping_file = \"d:/steam_mapping.csv\"\nnumber_base = 1\nclose_loop = True\nmax_sum = 500000\n\nif __name__ == \"__main__\":\n steam_id64_mapping = {}\n steam_game_mapping = {}\n id64_game_mapping = {}\n game_player = {}\n graph = set()\n forbidden_node = set()\n count = number_base\n game_count = 1\n if profile_dir[-1] != os.sep:\n profile_dir += os.sep\n lock_id = None\n if close_loop:\n lock_id = set([filename[:-5] for filename in os.listdir(profile_dir)])\n for filename in os.listdir(profile_dir):\n if filename[-5:] == \".json\":\n print(\"Parsing: %s\" % filename)\n with open(profile_dir + filename, \"r\") as fp:\n pr = json.load(fp)\n sid = pr[\"steamID64\"]\n if sid not in steam_id64_mapping:\n steam_id64_mapping[sid] = count\n count += 1\n if count - number_base >= max_sum:\n break\n if \"friend\" in pr:\n most_time = 0\n most_game = \"0\"\n if \"mostgame\" in pr and len(pr[\"mostgame\"]) != 0:\n for game_id, total_time, _ in pr[\"mostgame\"]:\n if total_time > most_time and game_id != \"10\" and game_id != \"240\":\n most_time = total_time\n most_game = game_id\n if \"game\" in pr and most_game == \"0\":\n for game_id, total_time, _ in pr[\"game\"]:\n if not isinstance(total_time, float):\n total_time = float(str(total_time).replace(\",\", \"\"))\n if total_time > most_time and game_id != \"10\" and game_id != \"240\":\n most_time = total_time\n most_game = game_id\n if most_game == \"10\" or most_game == \"240\":\n most_game = \"730\"\n if most_game not in steam_game_mapping:\n steam_game_mapping[most_game] = game_count\n game_count += 1\n id64_game_mapping[steam_id64_mapping[sid]] = steam_game_mapping[most_game]\n if steam_game_mapping[most_game] not in game_player:\n game_player[steam_game_mapping[most_game]] = [steam_id64_mapping[sid]]\n if steam_game_mapping[most_game] not in game_player:\n game_player[steam_game_mapping[most_game]] = []\n game_player[steam_game_mapping[most_game]].append(steam_id64_mapping[sid])\n for friend in pr[\"friend\"]:\n if lock_id is not None:\n if friend not in lock_id:\n continue\n if friend not in steam_id64_mapping:\n steam_id64_mapping[friend] = count\n count += 1\n a = steam_id64_mapping[sid]\n b = steam_id64_mapping[friend]\n if a < b:\n graph.add((a, b))\n else:\n graph.add((b, a))\n else:\n forbidden_node.add(steam_id64_mapping[sid])\n new_graph = set()\n mark_mapping = {}\n mark_count = 1\n for mark, nodes in game_player.items():\n if len(nodes) / len(steam_id64_mapping) < 0.01:\n for node in nodes:\n forbidden_node.add(node)\n else:\n mark_mapping[mark] = mark_count\n mark_count += 1\n node_mapping = {}\n new_id64_game_mapping = {}\n count = number_base\n for edge in graph:\n if edge[0] not in forbidden_node and edge[1] not in forbidden_node:\n if edge[0] not in node_mapping:\n node_mapping[edge[0]] = count\n new_id64_game_mapping[count] = id64_game_mapping[edge[0]]\n count += 1\n if edge[1] not in node_mapping:\n node_mapping[edge[1]] = count\n new_id64_game_mapping[count] = id64_game_mapping[edge[1]]\n count += 1\n new_graph.add(edge)\n with open(graph_file, \"w\") as fp:\n fp.write(\"%d\\n\" % len(node_mapping))\n for edge in new_graph:\n fp.write(\"%d\\t%d\\n\" % (node_mapping[edge[0]], node_mapping[edge[1]]))\n fp.write(\"%d\\t%d\\n\" % (node_mapping[edge[1]], node_mapping[edge[0]]))\n with open(mapping_file, \"w\") as fp:\n for steam_id64, number in steam_id64_mapping.items():\n fp.write(\"%s, %d\\n\" % (steam_id64, number))\n with open(game_mark_file, \"w\") as fp:\n id64_game_list = list(new_id64_game_mapping.items())\n id64_game_list.sort()\n print(steam_game_mapping)\n fp.write(\" \".join([str(mark_mapping[mark]) for _, mark in id64_game_list]))", "sub_path": "tool/friends_graph.py", "file_name": "friends_graph.py", "file_ext": "py", "file_size_in_byte": 4928, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.sep", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 28, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 29, "usage_type": "call"}, {"api_name": "json.load", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "493329645", "text": "import unittest\nimport pymongo\nimport configparser\nfrom src.Resources.Search import search\n\n\nclass TestSearch(unittest.TestCase):\n\n def setUp(self):\n config = configparser.ConfigParser()\n config.read('config.ini')\n db_host = config.get('mongodb', 'host')\n self.client = pymongo.MongoClient(host=db_host)\n self.db = self.client.get_database('guardian')\n\n self.db.test.insert_many([\n {\n \"url\": \"sdfsdf\",\n \"title\": \"Alice likes Bob\",\n \"content\": \"Alice likes Bob\",\n \"author\": \"Alice\"\n },\n {\n \"url\": \"sdfsald;fjk\",\n \"title\": \"Bob loves Alice\",\n \"content\": \"Bob loves Alice\",\n \"author\": \"Bob\"\n },\n {\n \"url\": \"lsadkfjal\",\n \"title\": \"Charlie hates Bob\",\n \"content\": \"Charlie hates Bob\",\n \"author\": \"Charlie\"\n }\n ])\n self.db.test.create_index([\n (\"title\", pymongo.TEXT),\n (\"content\", pymongo.TEXT)\n ])\n\n def test_none(self):\n # Should not get any result from the following keywords\n result = search(\"adsfasdf\", self.db.test)\n self.assertIsNone(result)\n result = search([\"Alice\", \"Bob\", \"Charlie\"], self.db.test)\n self.assertIsNone(result)\n\n def test_hit(self):\n # Should get exact result from the following keywords\n result = search(\"Alice\", self.db.test)\n self.assertIsInstance(result, dict)\n self.assertEqual(\n len(result), 2, msg=\"Search for\\\"Alice\\\" should get 2 results.\")\n result = search(\"Bob\", self.db.test)\n self.assertIsInstance(result, dict)\n self.assertEqual(\n len(result), 3, msg=\"Search for\\\"Bob\\\" should get 3 results.\")\n result = search(\"Charlie\", self.db.test)\n self.assertIsInstance(result, dict)\n self.assertEqual(\n len(result), 1, msg=\"Search for\\\"Charlie\\\" should get 1 results.\")\n\n def test_logic(self):\n # Should perform logical conjunction between keywords\n result = search([\"Alice\", \"Bob\"], self.db.test)\n self.assertIsInstance(result, dict)\n self.assertEqual(\n len(result), 2, msg=\"Search for lovers should get 2 results.\")\n result = search([\"Alice\", \"Charlie\"], self.db.test)\n self.assertIsNone(result)\n\n def tearDown(self):\n self.db.test.delete_many({})\n self.client.close()\n", "sub_path": "tests/test_search.py", "file_name": "test_search.py", "file_ext": "py", "file_size_in_byte": 2546, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 10, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 13, "usage_type": "call"}, {"api_name": "pymongo.TEXT", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pymongo.TEXT", "line_number": 38, "usage_type": "attribute"}, {"api_name": "src.Resources.Search.search", "line_number": 43, "usage_type": "call"}, {"api_name": "src.Resources.Search.search", "line_number": 45, "usage_type": "call"}, {"api_name": "src.Resources.Search.search", "line_number": 50, "usage_type": "call"}, {"api_name": "src.Resources.Search.search", "line_number": 54, "usage_type": "call"}, {"api_name": "src.Resources.Search.search", "line_number": 58, "usage_type": "call"}, {"api_name": "src.Resources.Search.search", "line_number": 65, "usage_type": "call"}, {"api_name": "src.Resources.Search.search", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "70648523", "text": "import datetime\nimport sqlite3\n\nfrom load_data import DATABASE, QUESTION_KEYS\nfrom flask import g\n\n\ndef close_connection():\n db = getattr(g, \"_database\", None)\n if db is not None:\n db.close()\n\n\ndef get_db():\n db = getattr(g, \"_database\", None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE, isolation_level=None)\n return db\n\n\ndef update_quiz(form, id):\n cur = get_db().cursor()\n sql = \"UPDATE encuestas SET candidato_elegido=? WHERE id=?;\"\n\n candidato = int(form[\"candidato\"])\n cur.execute(sql, (candidato, id))\n\n\ndef save_response(form):\n fecha = datetime.datetime.now().isoformat()\n cur = get_db().cursor()\n sql = \"INSERT INTO encuestas ('fecha','version') VALUES(?,?);\"\n res = cur.execute(sql, (fecha, 2))\n id_encuesta = int(res.lastrowid)\n\n for id_pregunta in QUESTION_KEYS:\n respuesta = int(form[id_pregunta])\n\n sql = (\n \"INSERT INTO respuestas_encuestas\"\n \"('id_encuesta','id_pregunta','respuesta') VALUES(?,?,?);\"\n )\n cur.execute(sql, (id_encuesta, id_pregunta.split(\"_\")[-1], respuesta))\n\n return id_encuesta\n\n\ndef count_rows():\n cur = get_db().cursor()\n sql = \"SELECT count(1) FROM encuestas;\"\n rows = cur.execute(sql).fetchall()\n return str(rows[0][0])\n", "sub_path": "predictor_pol/db_manager.py", "file_name": "db_manager.py", "file_ext": "py", "file_size_in_byte": 1302, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.g", "line_number": 9, "usage_type": "argument"}, {"api_name": "flask.g", "line_number": 15, "usage_type": "argument"}, {"api_name": "flask.g._database", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 17, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 17, "usage_type": "call"}, {"api_name": "load_data.DATABASE", "line_number": 17, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "load_data.QUESTION_KEYS", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "104868624", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 05 13:38:26 2015\n\n@author: zhangbohun\n\"\"\"\n\nimport json\nimport xlwt\n\ndef txt_to_xls(txt_file):\n with open(txt_file,'r') as f:\n json_content = json.load(f)\n workbook = xlwt.Workbook(encoding = 'utf-8')\n sheet = workbook.add_sheet('city', cell_overwrite_ok=True)\n \n for i in range(len(json_content)):\n # 写入对应行列\n row = i\n col = 0\n \n #顺序不对\n #sheet.write(row, col, json_content.keys()[i])\n #json_data = json_content[json_content.keys()[i]]\n \n #不过这里碰巧可以用数字做key\n sheet.write(row, col, i+1)\n json_data = json_content[str(i+1)]\n sheet.write(row,col+1,json_data)\n workbook.save('city.xls')\n\t\t\nif __name__ == '__main__':\n\ttxt_to_xls('city.txt')", "sub_path": "0015/0015.py", "file_name": "0015.py", "file_ext": "py", "file_size_in_byte": 832, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "141220571", "text": "import pygame\nimport random\nimport math\n\npygame.init()\n\nscreen = pygame.display.set_mode((852,480))\npygame.display.set_caption(\"catch eggs\")\nicon = pygame.image.load('basket.png')\npygame.display.set_icon(icon)\n\nbackground = pygame.image.load('backimg.jpg')\n\nhen = pygame.image.load('hen.png')\nhenx = random.randint(0,788)\nheny = 0\nhenx_change = 0.3\nheny_change = 0\n\nbasket = pygame.image.load('bbsket.png')\nbasketx = 20\nbaskety = 400\nbasketx_change = 0\nbaskety_change = 0\n\negg = pygame.image.load('egg.png')\neggx = henx\neggy = heny\neggx_change = 0\neggy_change = 0.3\n\nscore = 0\nmissed = 0\nfont = pygame.font.Font('freesansbold.ttf',32)\n\n\ndef displayScore(show,x,y):\n scrnscore = font.render (\"Egg Score : \"+str(score), show, (255,255,255))\n screen.blit(scrnscore, (x, y))\n\ndef displayMissedScore(show,x,y):\n missedScrnScore = font.render (\"Missed : \"+str(missed), show, (255,255,255))\n screen.blit(missedScrnScore, (x,y))\n\ndef isCollided(ex, ey, bx, by):\n distance = math.sqrt((math.pow(bx - ex, 2)) + (math.pow(by - ey, 2)))\n if distance < 40:\n return True\n else:\n return False\n\ndef putegg(x,y):\n screen.blit(egg, (x, y))\n\n\ngame_over = False\nwhile not game_over:\n screen.blit(background, (0, 0))\n screen.blit(hen, (henx, heny))\n screen.blit(basket, (basketx, baskety))\n putegg(eggx,eggy)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n basketx_change = -0.6\n if event.key == pygame.K_RIGHT:\n basketx_change = +0.6\n\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n basketx_change = 0\n\n basketx += basketx_change\n if basketx <= 0:\n basketx = 0\n elif basketx >= 788:\n basketx = 788\n\n henx += henx_change\n if henx <= 0:\n henx_change = 0.3\n heny += heny_change\n if heny >= 416:\n heny=50\n elif henx >= 788:\n henx_change = -0.3\n heny += heny_change\n if heny >= 416 :\n heny = 50\n\n if eggy >= 0:\n eggy += eggy_change\n if eggy >= 450:\n eggy,eggx = 0,henx\n\n eggy += eggy_change\n\n collision = isCollided(eggx, eggy, basketx, baskety)\n if collision:\n eggy , eggx = 0 , henx\n score +=1\n henx = random.randint(0,788)\n heny = 0\n elif collision == False and eggy>=449:\n missed+=0.5\n\n if missed >= 5.1:\n screen.fill((255,240,200))\n msg = pygame.font.SysFont('comicsansms', 60).render(\"GAME OVER\", True, (112, 114, 255))\n screen.blit(msg, [270, 150])\n\n\n displayScore(True,10,10)\n displayMissedScore(True,10,50)\n\n pygame.display.update()\n\npygame.quit()\nquit()", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2848, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pygame.init", "line_number": 5, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 14, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 34, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 46, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 73, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.font.SysFont", "line_number": 112, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 119, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "253530017", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\n@author: zhaogao\n@license: (C) Copyright 2013-2018.\n@contact: 449628536@qq.com\n@software: web-uiautomation-selenium\n@file: test_order.py\n@time: 2019/1/5 下午5:34\n'''\n\nimport pytest\nfrom pages import main_page\n\n\n# pytest get the registered attr driver\n@pytest.mark.usefixtures('driver')\nclass TestDemo(object):\n\n def test_01_order(self, order_price_fixture):\n main = main_page.MainPage(self.driver)\n main.wait_page_present()\n main.set_text_nav_search_text_box('软件测试')\n search_results = main.click_nav_search_text_btn()\n search_results.wait_page_persent()\n order_confirm = search_results.click_product_book_st_second_ver_in_results().click_add_to_cart_btn()\n assert order_confirm.get_order_confirm_msg() == '商品已加入购物车'\n assert order_confirm.get_order_confirm_price() == order_price_fixture\n", "sub_path": "tests/test_order.py", "file_name": "test_order.py", "file_ext": "py", "file_size_in_byte": 923, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pages.main_page.MainPage", "line_number": 21, "usage_type": "call"}, {"api_name": "pages.main_page", "line_number": 21, "usage_type": "name"}, {"api_name": "pytest.mark.usefixtures", "line_number": 17, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "111619406", "text": "import os\nimport json\nimport subprocess\nimport pandas\nfrom datetime import datetime\nimport numpy as np\n\nos.chdir(\"/home/user/Documents/Venitha/COVID_19_Meta/General/COVID-biorxiv\")\n\ncollection={}\n\ndef execute_commandRealtime(cmd):\n \"\"\"Execute shell command and print stdout in realtime.\n Function taken from pyrpipe Singh et.al. 2020\n usage:\n for output in execute_commandRealtime(['curl','-o',outfile,link]):\n print (output)/home/dell/Documents/Venitha/COVID_19_Meta/General/COVID-biorxiv\n \"\"\"\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)\n for stdout_line in iter(popen.stdout.readline, \"\"):\n yield stdout_line\n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, cmd)\n\ndef read_collection():\n '''\n open file\n '''\n val=0\n filename='collection.json'\n with open(filename, \"r\") as f:\n data = json.load(f)\n #data is a list of dictionaries\n #print(type(data))\n return data\n\ndef get_terms():\n\tprint('Available terms: \\n')\n\tfor number, entry in enumerate(collection):\n\t\tx=[]\n\t\tfor keys, values in entry.items():\n\t\t\tx.append(keys)\n\t\treturn(np.unique(np.array(x)))\n \t\ndef search(term):\n\t#search in collection is a list of dicts\n\tprint('\\nSearching for keyword',term)\n\tresult=[]\n\tfor d in collection:\n\t\t#search in all keys\n\t\tif (term.lower() in d['rel_title'].lower()) or (term.lower() in d['rel_abs'].lower()):\n\t\t\tresult.append(d)\n\t\t\t#return(np.unique(np.array(result)))\n\treturn(result)\t\n\t\ndef searchall(keywords):\n\tresult=[]\n\tfor k in keywords:\n\t\tresult.extend(search(k))\n\treturn result\t\n\t\ndef removedupes(result):\n\tseen=[]\n\tnew_l=[]\n\tfor d in result:\n\t\tt = tuple(d.items())\n\t\tif t not in seen:\n\t\t\tseen.append(t)\n\t\t\tnew_l.append(d)\n\tprint(\"\\nNumber of matches for keywords \",tosearch,\"after removing duplicates is :\",len(new_l))\n\treturn(new_l)\n\t\ndef get_title(res):\n titles=[]\n for d in res:\n if not d['rel_title'] in titles:\n titles.append(d['rel_title'])\n #print(d['rel_title'])\n return titles\n\ndef get_date(res):\n dates=[]\n for d in res:\n if not d['rel_date'] in dates:\n dates.append(d['rel_date'])\n return dates\n\ndef get_doi(res):\n dois=[]\n for d in res:\n if not d['rel_doi'] in dois:\n dois.append(d['rel_doi'])\n return dois\n \ndef get_info(res):\n\ttitles=[]\n\tdates=[]\n\tdois=[]\n\tfor d in res:\n\t\tif not d['rel_title'] in titles:\n\t\t\ttitles.append(d['rel_title'])\n\t\t\tdates.append(d['rel_date'])\n\t\t\tdois.append(d['rel_doi'])\n\tfilename=datetime.today().strftime('%Y-%m-%d')\n\twith open(\"date_\" + filename + \".txt\", 'w') as f:\n\t\tfor item in dates:\n\t\t\tf.write(\"%s\\n\" % item)\n\n\twith open(\"doi_\" + filename + \".txt\", 'w') as f:\n\t\tfor item in dois:\n\t\t\tf.write(\"%s\\n\" % item)\n\treturn titles\n\t\t\n\t\t\n\ndef filter_date(res,startdate):\n '''\n keep results by date\n '''\n print('\\nFiltering results before',startdate)\n filtered=[]\n for d in res:\n if datetime.strptime(d['rel_date'], '%Y-%m-%d')<=startdate:\n filtered.append(d)\n return filtered\n\n\n#read collection in memory\ncollection=read_collection()\n\nprint(\"JSON API Collection is of type : \",type(collection), \"where it is a list of dictionaries \\n\")\n\n#see available terms\nprint(get_terms())\n\n#perform search\n\n#single keyword search\n#res=search('RNA-seq')\n\n#multiple keyword search\n#tosearch=['proteomics','proteome','mass spectrometry']\n#tosearch=['transcriptome','RNA-Seq','nasal','oropharyngeal','swab']\n#res=searchall(tosearch)\n\n\n\n#CRISPR\n#tosearch=['CRISPR','genome-wide screen']\n#res=[]\n#for d in collection:\n#\tif (tosearch[0].lower() in d['rel_abs'].lower() or tosearch[0].lower() in d['rel_title'].lower()) or (tosearch[1].lower() in d['rel_abs'].lower() or tosearch[1].lower() in d['rel_title'].lower()):\n#\t\tres.append(d)\t\n\n#Interactome\ntosearch=['Interactome','Protein-Protein Interaction','Protein-Protein Interactions','global proteome','Multi-omics','Multi-omic']\n#res=searchall(tosearch)\nres=[]\nfor d in collection:\n\tif tosearch[0].lower() in d['rel_abs'].lower() or tosearch[0].lower() in d['rel_title'].lower(): \n\t\tres.append(d)\n\telif (tosearch[1].lower() in d['rel_abs'].lower() or tosearch[1].lower() in d['rel_title'].lower()) or (tosearch[2].lower() in d['rel_abs'].lower() or tosearch[2].lower() in d['rel_title'].lower()):\n\t\tres.append(d)\n\telif tosearch[3].lower() in d['rel_abs'].lower() or tosearch[3].lower() in d['rel_title'].lower():\n\t\tres.append(d)\n\telif (tosearch[4].lower() in d['rel_abs'].lower() or tosearch[4].lower() in d['rel_title'].lower()) or (tosearch[5].lower() in d['rel_abs'].lower() or tosearch[5].lower() in d['rel_title'].lower()):\n\t\tres.append(d)\n\nprint(\"\\nNumber of matches for keywords \",tosearch,\"is :\",len(res))\n\n#Remove duplicate records\nfilt_res=removedupes(res)\n\n#Filtering by date\n#fdate=datetime.strptime('2020-09-15', '%Y-%m-%d')\nfdate=datetime.strptime('2020-10-12', '%Y-%m-%d')\n\nfinal_res=get_info(filter_date(filt_res,fdate))\n\nprint(\"\\nNumber of records matching \",tosearch,\"filtered before \",fdate,\"is \",len(final_res),\"\\n\")\n\nfilename=datetime.today().strftime('%Y-%m-%d')\n\nprint(\"****************************************************************************************************************************\")\nprint(\"\\nWriting results to file \",filename + \".txt\",\"\\n\")\n \nwith open(\"title_\" + filename + \".txt\", 'w') as f:\n for item in final_res:\n f.write(\"%s\\n\" % item)\n\ncommand=['sed','\"s/^/https:\\/\\/doi.org\\//\"',\"doi_\" + filename + \".txt\",\">\",\"doi_\" + filename+ \"_edited\" + \".txt\"]\ncommand= \" \".join(command)\n\nos.system(command)\n\ncommand=['paste',\"title_\" + filename + \".txt\",\"date_\" + filename + \".txt\",\"doi_\" + filename + \"_edited\" + \".txt\",\">\",filename + \".csv\"]\ncommand= \" \".join(command)\n\nos.system(command)\n", "sub_path": "query.py", "file_name": "query.py", "file_ext": "py", "file_size_in_byte": 5851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.chdir", "line_number": 8, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 19, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 25, "usage_type": "call"}, {"api_name": "json.load", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 106, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 125, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 178, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 178, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 184, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 184, "usage_type": "name"}, {"api_name": "os.system", "line_number": 196, "usage_type": "call"}, {"api_name": "os.system", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "106069748", "text": "from __future__ import annotations\n\nimport pytest\nfrom snapflow.core.graph import Graph, graph_from_yaml\nfrom snapflow.modules import core\nfrom tests.utils import (\n function_chain_t1_to_t2,\n function_generic,\n function_multiple_input,\n function_self,\n function_t1_source,\n function_t1_to_t2,\n make_test_env,\n)\n\n\ndef make_graph() -> Graph:\n env = make_test_env()\n env.add_module(core)\n g = Graph(env)\n g.create_node(key=\"node1\", function=function_t1_source)\n g.node(key=\"node2\", function=function_t1_source)\n g.node(key=\"node3\", function=function_t1_to_t2, input=\"node1\")\n g.node(key=\"node4\", function=function_t1_to_t2, input=\"node2\")\n g.node(key=\"node5\", function=function_generic, input=\"node4\")\n g.node(key=\"node6\", function=function_self, input=\"node4\")\n g.node(\n key=\"node7\",\n function=function_multiple_input,\n inputs={\"input\": \"node4\", \"other_t2\": \"node3\"},\n )\n return g\n\n\ndef test_dupe_node():\n g = make_graph()\n with pytest.raises(KeyError):\n g.create_node(key=\"node1\", function=function_t1_source)\n\n\ndef test_declared_graph():\n g = make_graph()\n n1 = g.get_node(\"node1\")\n n2 = g.get_node(\"node2\")\n n4 = g.get_node(\"node4\")\n n5 = g.get_node(\"node5\")\n assert len(list(g.all_nodes())) == 7\n assert g.get_all_upstream_dependencies_in_execution_order(n1) == [n1]\n assert g.get_all_upstream_dependencies_in_execution_order(n5) == [\n n2,\n n4,\n n5,\n ]\n\n\ndef test_make_graph():\n g = make_graph()\n nodes = {\n \"node1\",\n \"node2\",\n \"node3\",\n \"node4\",\n \"node5\",\n \"node6\",\n \"node7\",\n }\n assert set(n.key for n in g.all_nodes()) == nodes\n n3 = g.get_node(\"node3\")\n n7 = g.get_node(\"node7\")\n assert len(g.get_all_upstream_dependencies_in_execution_order(n3)) == 2\n assert len(g.get_all_upstream_dependencies_in_execution_order(n7)) == 5\n assert len(g.get_all_nodes_in_execution_order()) == len(nodes)\n execution_order = [n.key for n in g.get_all_nodes_in_execution_order()]\n expected_orderings = [\n [\n \"node2\",\n \"node4\",\n \"node5\",\n ],\n [\n \"node2\",\n \"node4\",\n \"node6\",\n ],\n [\n \"node1\",\n \"node3\",\n \"node7\",\n ],\n ]\n # TODO: graph sort not stable!\n for ordering in expected_orderings:\n for i, n in enumerate(ordering[:-1]):\n assert execution_order.index(n) < execution_order.index(ordering[i + 1])\n\n\ndef test_graph_from_yaml():\n g = graph_from_yaml(\n \"\"\"\n nodes:\n - key: stripe_charges\n function: stripe.extract_charges\n params:\n api_key: \"*****\"\n - key: accumulated_stripe_charges\n function: core.accumulator\n inputs:\n - stripe_charges\n - key: stripe_customer_lifetime_sales\n function: customer_lifetime_sales\n inputs:\n - accumulated_stripe_charges\n \"\"\"\n )\n assert len(list(g.all_nodes())) == 3\n assert g.get_node(\"stripe_charges\").params == {\"api_key\": \"*****\"}\n assert g.get_node(\"accumulated_stripe_charges\").inputs == \"stripe_charges\"\n assert (\n g.get_node(\"stripe_customer_lifetime_sales\").inputs\n == \"accumulated_stripe_charges\"\n )\n", "sub_path": "tests/test_graph.py", "file_name": "test_graph.py", "file_ext": "py", "file_size_in_byte": 3350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "tests.utils.make_test_env", "line_number": 18, "usage_type": "call"}, {"api_name": "snapflow.modules.core", "line_number": 19, "usage_type": "argument"}, {"api_name": "snapflow.core.graph.Graph", "line_number": 20, "usage_type": "call"}, {"api_name": "tests.utils.function_t1_source", "line_number": 21, "usage_type": "name"}, {"api_name": "tests.utils.function_t1_source", "line_number": 22, "usage_type": "name"}, {"api_name": "tests.utils.function_t1_to_t2", "line_number": 23, "usage_type": "name"}, {"api_name": "tests.utils.function_t1_to_t2", "line_number": 24, "usage_type": "name"}, {"api_name": "tests.utils.function_generic", "line_number": 25, "usage_type": "name"}, {"api_name": "tests.utils.function_self", "line_number": 26, "usage_type": "name"}, {"api_name": "tests.utils.function_multiple_input", "line_number": 29, "usage_type": "name"}, {"api_name": "snapflow.core.graph.Graph", "line_number": 17, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 37, "usage_type": "call"}, {"api_name": "tests.utils.function_t1_source", "line_number": 38, "usage_type": "name"}, {"api_name": "snapflow.core.graph.graph_from_yaml", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "199036568", "text": "import random # to generating random numbers\nimport sys\nfrom typing import Mapping # to exit program\nimport pygame \nfrom pygame.locals import * # basic pygame import\n\n# Globle variables\nFPS = 200\nSCREENWIDTH = 289\nSCREENHEIGHT = 511\nSCREEN = pygame.display.set_mode((SCREENWIDTH,SCREENHEIGHT))\nBACKGROUND = 'background.jpg'\nCOCKPIT = 'cockpit.png'\nWELLCOME = 'WELLCOME.jpg'\nUFO = 'UFO.png'\nASTEROID = 'asteroid.png'\nGAME_SPRITES = {}\nGAME_SOUNDS = {}\ndef MainGame():\n while True:\n ASTOX_CHANGE = 0\n ASTOY_CHANGE = 0\n Asteroid = []\n newAsteroid = randAsteroid()\n Asteroid.append(newAsteroid[0])\n print(Asteroid)\n for event in pygame.event.get():\n # if user presses cross button,close the game\n if event.type == pygame.QUIT or (event.type==KEYDOWN and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n \n SCREEN.blit(GAME_SPRITES['background'],(0,0))\n i = 1\n while i < 600:\n SCREEN.blit(GAME_SPRITES['background2'],(0,0))\n ASTEROID2 = pygame.transform.scale(GAME_SPRITES['asteroid'], (2*i,2*i))\n SCREEN.blit(ASTEROID2,(Asteroid[0]['x']-(i),Asteroid[0]['y']-(i)))\n SCREEN.blit(GAME_SPRITES['COCKPIT'],(0,441))\n pygame.display.update()\n if 0 < Asteroid[0]['x'] < 5 or 284 < Asteroid[0]['x'] < 289:\n if len(Asteroid) < 3:\n Asteroid.append(newAsteroid)\n if 0 < Asteroid[0]['y'] < 5 or 506 < Asteroid[0]['x'] < 511:\n if len(Asteroid) < 3:\n Asteroid.append(newAsteroid)\n if Asteroid[0]['x'] < -GAME_SPRITES['asteroid'].get_width() or Asteroid[0]['x'] > (SCREENWIDTH + GAME_SPRITES['asteroid'].get_width()):\n Asteroid.pop(0)\n i = 100\n break\n if Asteroid[0]['y'] < -GAME_SPRITES['asteroid'].get_height() or Asteroid[0]['y'] > (SCREENHEIGHT + GAME_SPRITES['asteroid'].get_height()):\n Asteroid.pop(0)\n i = 100\n break\n else:\n i += 1\n for event in pygame.event.get():\n # if user presses cross button,close the game\n if event.type == pygame.QUIT or (event.type==KEYDOWN and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n ASTOX_CHANGE = 1.5\n if event.key == pygame.K_RIGHT:\n ASTOX_CHANGE = -1.5\n if event.key == pygame.K_UP:\n ASTOY_CHANGE = 1.5\n if event.key == pygame.K_DOWN:\n ASTOY_CHANGE = -1.5\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT or event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n ASTOY_CHANGE = 0\n ASTOX_CHANGE = 0\n Asteroid[0]['x'] += ASTOX_CHANGE\n Asteroid[0]['y'] += ASTOY_CHANGE\n FPSCLOCK.tick(FPS)\ndef wellcomescreen():\n while True:\n for event in pygame.event.get():\n # if user presses cross button,close the game\n if event.type == pygame.QUIT or (event.type==KEYDOWN and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n elif event.type==KEYDOWN and (event.key==K_SPACE or event.key == K_UP):\n return\n else:\n SCREEN.blit(GAME_SPRITES['wellcome'],(0,0))\n pygame.display.update()\n FPSCLOCK.tick(FPS)\ndef randAsteroid():\n PLAYERX = random.randint(0,200)\n PLAYERY = random.randint(0,400)\n randAst = [\n {'x' : PLAYERX, 'y' : PLAYERY}\n ]\n return randAst\n\nif __name__ == \"__main__\":\n\n\n pygame.init() #initialize all pygame modules\n FPSCLOCK = pygame.time.Clock()\n pygame.display.set_caption('GAME by Mahendra')\n GAME_SPRITES['wellcome'] = pygame.image.load(WELLCOME).convert()\n GAME_SPRITES['background'] = pygame.image.load(BACKGROUND).convert()\n GAME_SPRITES['UFO'] = pygame.image.load(UFO).convert_alpha()\n GAME_SPRITES['asteroid'] = pygame.image.load(ASTEROID).convert_alpha()\n GAME_SPRITES['COCKPIT'] = pygame.image.load(COCKPIT).convert_alpha()\n GAME_SPRITES['background2'] = pygame.transform.scale(GAME_SPRITES['background'], (289,441))\n while True:\n wellcomescreen()\n MainGame()", "sub_path": "space game/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4623, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pygame.display.set_mode", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 60, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 83, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 89, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 92, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 104, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 106, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 108, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 109, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 110, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 110, "usage_type": "attribute"}]} +{"seq_id": "518330170", "text": "import sys\nfrom PySide2 import QtWidgets, QtCore\n\nif __name__ == \"__main__\":\n qApp = QtWidgets.QApplication(sys.argv)\n\n mainWidget = QtWidgets.QWidget()\n mainWidget.setMinimumSize(800, 600)\n mainLayout = QtWidgets.QGridLayout(mainWidget)\n\n blue = QtWidgets.QWidget(mainWidget)\n blue.setStyleSheet(\"background-color: blue\")\n blue.setFixedSize(100, 100)\n edit = QtWidgets.QTextEdit(mainWidget)\n\n mainLayout.addWidget(edit, 0, 0)\n mainLayout.addWidget(blue, 0, 1)\n\n mainWidget.show()\n sys.exit(qApp.exec_())\n", "sub_path": "src/pyschool/pyside2/hw4b.py", "file_name": "hw4b.py", "file_ext": "py", "file_size_in_byte": 541, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "PySide2.QtWidgets.QApplication", "line_number": 5, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 5, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 5, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 7, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 7, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QGridLayout", "line_number": 9, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 9, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 11, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 11, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QTextEdit", "line_number": 14, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 14, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "259227759", "text": "'''\nGoogle DeepDream Local API\nLei Mao\n9/17/2017\nDepartment of Computer Science\nUniversity of Chicago\nDeveloped and Tested in TensorFlow 1.3 and Python 3.6\n'''\n\nimport argparse\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description = 'Designate function and keywords')\n group = parser.add_mutually_exclusive_group()\n\n # List the available layers and the number of channels\n # No input required\n group.add_argument('-l','--list', action = 'store_true', \n help = 'List the available layers and the number of channels')\n # Preview the feature pattern of the neural network\n # Inputs: layer name, channel number\n group.add_argument('-p','--preview', nargs = 3, metavar=('layer_name', 'channel_number', 'output_filename'), \n help = 'Preview the feature pattern of the neural network')\n # Preview the feature pattern of the neural network with Laplacian Pyramid Gradient Normalization\n # Inputs: layer name, channel number\n group.add_argument('-pl','--previewlap', nargs = 3, metavar=('layer_name', 'channel_number', 'output_filename'), \n help = 'Preview the feature pattern of the neural network with Laplacian Pyramid Gradient Normalization')\n # Render the image with the features from the neural network\n # Inputs: image path, layer name, channel number\n group.add_argument('-r', '--render', nargs = 4, metavar=('image_path', 'layer_name', 'channel_number', 'output_filename'), \n help = 'Render the image with the features from the neural network')\n # Render the image with the features from the neural network with Laplacian Pyramid Gradient Normalization\n # Inputs: image path, layer name, channel number \n group.add_argument('-rl', '--renderlap', nargs = 4, metavar=('image_path', 'layer_name', 'channel_number', 'output_filename'), \n help = 'Render the image with the features from the neural network with Laplacian Pyramid Gradient Normalization')\n # Customize the image with the features from guide images\n # Inputs: image path, guide image path, layer name, channel number\n # This function is currently unavailable\n # group.add_argument('-c','--customize', nargs = 4, metavar=('image_path', 'guide_image_path', 'layer_name', 'channel_number'), \n # help = 'Customize the image with the features from guide images')\n args = parser.parse_args()\n\n if args.list:\n\n from deepdream import deepdream\n dream = deepdream()\n dream.show_layers()\n\n if args.preview:\n layer = str(args.preview[0])\n channel = int(args.preview[1])\n output_filename = str(args.preview[2])\n\n from deepdream import deepdream\n dream = deepdream()\n dream_obj = dream.T(layer = layer)[:,:,:,channel]\n dream.render_naive(t_obj = dream_obj, output_filename = output_filename)\n\n if args.previewlap:\n layer = str(args.previewlap[0])\n channel = int(args.previewlap[1])\n output_filename = str(args.previewlap[2])\n\n from deepdream import deepdream\n dream = deepdream()\n dream_obj = dream.T(layer = layer)[:,:,:,channel]\n dream.render_lapnorm(t_obj = dream_obj, output_filename = output_filename, octave_n = 1, iter_n = 30)\n\n if args.render:\n\n image_path = str(args.render[0])\n layer = str(args.render[1])\n channel = int(args.render[2])\n output_filename = str(args.render[3])\n\n from deepdream import deepdream\n import numpy as np\n from PIL import Image\n dream = deepdream()\n img0 = Image.open(image_path)\n img0 = np.float32(img0)\n dream_obj = dream.T(layer = layer)[:,:,:,channel]\n dream.render_deepdream(t_obj = dream_obj, img0 = img0, output_filename = output_filename, iter_n = 10)\n\n if args.renderlap:\n\n image_path = str(args.renderlap[0])\n layer = str(args.renderlap[1])\n channel = int(args.renderlap[2])\n output_filename = str(args.renderlap[3])\n\n from deepdream import deepdream\n import numpy as np\n from PIL import Image\n dream = deepdream()\n img0 = Image.open(image_path)\n img0 = np.float32(img0)\n dream_obj = dream.T(layer = layer)[:,:,:,channel]\n dream.render_deepdream_lapnorm(t_obj = dream_obj, img0 = img0, output_filename = output_filename, iter_n = 10)\n\n\nif __name__ == '__main__':\n\n main()", "sub_path": "deepdream_api.py", "file_name": "deepdream_api.py", "file_ext": "py", "file_size_in_byte": 4384, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "deepdream.deepdream", "line_number": 48, "usage_type": "call"}, {"api_name": "deepdream.deepdream", "line_number": 57, "usage_type": "call"}, {"api_name": "deepdream.deepdream", "line_number": 67, "usage_type": "call"}, {"api_name": "deepdream.deepdream", "line_number": 81, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 82, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 82, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 83, "usage_type": "call"}, {"api_name": "deepdream.deepdream", "line_number": 97, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 98, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 98, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "114780944", "text": "import csv\r\nimport numpy as np\r\n# import matplotlib.pyplot as plt\r\nfrom sklearn import datasets, linear_model\r\n\r\nwith open('1-prostate-training-data.csv') as csvfile:\r\n datatrainreader = csv.reader(csvfile)\r\n next(datatrainreader)\r\n data_str = np.array(list(datatrainreader))\r\n data = data_str.astype(np.float)\r\n X = np.array(data[:, range(data.shape[1] - 1)])\r\n y = np.array(data[:, [data.shape[1] - 1]])\r\n one_matrix = np.array(np.ones((X.shape[0], 1)))\r\n A = np.concatenate((one_matrix, X), axis = 1)\r\n\r\n # fit the model by Linear Regression\r\n regr = linear_model.LinearRegression(fit_intercept=False)\r\n # fit_intercept = False for calculating the bias\r\n regr.fit(A, y)\r\n w = np.array(list(regr.coef_))\r\n # print( 'Solution found by scikit-learn : ', w )\r\n\r\n #Test Result\r\n with open('20144052.csv') as csvtest:\r\n datatestreader = csv.reader(csvtest)\r\n datatest_str = np.array(list(datatestreader))\r\n datatest = datatest_str.astype(np.float)\r\n X_test = np.array(datatest[:, range(datatest.shape[1] - 1)])\r\n one_matrix = np.array(np.ones((X_test.shape[0], 1)))\r\n A_test = np.concatenate((one_matrix, X_test), axis = 1)\r\n y_test = np.dot(A_test, w.T)\r\n print('result: ', y_test)\r\n", "sub_path": "data/final_responses/IT4866_20144052/IT4866_20144052.py", "file_name": "IT4866_20144052.py", "file_ext": "py", "file_size_in_byte": 1286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "csv.reader", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "242144057", "text": "from matplotlib import pyplot\r\nfrom clases import *\r\n\r\ndef graficoconsumo__(self):\r\n datos = ('Filamento','Electricidad','Depreciacion de la impresora','Preparacion','Post-procesing','Consumibles')\r\n slices = (self.cost_filamento(),self.cost_electricidad(),self.depreciacion_impresora(),self.preparacion(),self.post_proces(),self.material_consumido())\r\n colores = ('red','blue','green','pink','yellow','orange')\r\n\r\n pyplot.pie(slices, colors=colores, autopct=\"%1.1f%%\")\r\n pyplot.axis(\"equal\")\r\n pyplot.title(\"costo de frabricación\")\r\n pyplot.legend(labels=datos)\r\n pyplot.show()\r\n\r\ndef graficotendensia__(self):\r\n datos = ('Tiempo de uso de Impresora','Energia consumida','tiempo de proceso consumido')\r\n slices = (self.imp_timeImpresion,self.opcion_imp_consumo,self.opcion_imp_timeDepre)\r\n\r\n pyplot.plot(datos,slices)\r\n pyplot.title(\"Analisis de tendencias en los servicios\")\r\n pyplot.legend(labels=datos)\r\n pyplot.show()\r\n\r\n", "sub_path": "prueba/graficos.py", "file_name": "graficos.py", "file_ext": "py", "file_size_in_byte": 969, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "matplotlib.pyplot.pie", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "366967641", "text": "import torch\nfrom collections import OrderedDict\nfrom torch.optim import Adam, SGD\nfrom model_TCN import Model, StaticModel\nimport torchvision.utils as vutils\nimport torch.nn.functional as F\nimport os\nimport numpy as np\nimport cv2\nimport torch.nn as nn\n\n\nEPSILON = 1e-8\np = OrderedDict()\n\np['lr_bone'] = 5e-5 # Learning rate\np['lr_branch'] = 0.025\np['wd'] = 0.0005 # Weight decay\np['momentum'] = 0.90 # Momentum\nlr_decay_epoch = [9, 20]\nnAveGrad = 10 # Update the weights once in 'nAveGrad' forward passes\nshowEvery = 50\ntmp_path = 'tmp_out'\n\nclass Solver(object):\n def __init__(self, train_loader, test_loader, config, save_fold=None):\n\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.config = config\n self.save_fold = save_fold\n\n self.build_model()\n\n # if config.mode == 'test':\n # self.net_bone.eval()\n\n def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel() # 返回一个tensor变量内所有元素个数\n print(name)\n print(model)\n print(\"The number of parameters: {}\".format(num_params))\n\n # build the network\n def build_model(self):\n print('mode: {}'.format(self.config.mode))\n print('------------------------------------------')\n if self.config.train_step == 1:\n self.net_bone = StaticModel(3)\n else:\n self.net_bone = Model(3)\n if self.config.cuda:\n self.net_bone = self.net_bone.cuda()\n\n if self.config.mode == 'train':\n if self.config.model_path != '':\n assert (os.path.exists(self.config.model_path)), ('please import correct pretrained model path!')\n self.net_bone.load_pretrain_model(self.config.model_path)\n if self.config.static_path != '':\n assert (os.path.exists(self.config.static_path)), ('please import correct pretrained model path!')\n self.net_bone.features.load_pretrain_model(self.config.static_path)\n else:\n assert (self.config.model_path != ''), ('Test mode, please import pretrained model path!')\n assert (os.path.exists(self.config.model_path)), ('please import correct pretrained model path!')\n self.net_bone.load_pretrain_model(self.config.model_path)\n\n self.lr_bone = p['lr_bone']\n self.lr_branch = p['lr_branch']\n self.optimizer_bone = Adam(filter(lambda p: p.requires_grad, self.net_bone.parameters()), lr=self.lr_bone,\n weight_decay=p['wd'])\n print('------------------------------------------')\n self.print_network(self.net_bone, 'DSNet')\n print('------------------------------------------')\n\n def test(self):\n kk = {}\n rr = {}\n\n if not os.path.exists(self.save_fold):\n os.makedirs(self.save_fold)\n for i, data_batch in enumerate(self.test_loader):\n frame1, frame2, frame3, frame4, frame5, label, split, size, name = data_batch['frame1'], data_batch['frame2'], data_batch['frame3'], data_batch['frame4'], data_batch['frame5'], data_batch['label'], data_batch['split'], data_batch['size'], data_batch['name']\n dataset = data_batch['dataset']\n\n if self.config.cuda:\n frame1, frame2, frame3, frame4, frame5 = frame1.cuda(), frame2.cuda(), frame3.cuda(), frame4.cuda(), frame5.cuda()\n with torch.no_grad():\n\n pre = self.net_bone(frame1, frame2, frame3, frame4, frame5)\n\n for i in range(self.config.test_batch_size):\n\n presavefold = os.path.join(self.save_fold, dataset[i], split[i])\n\n if not os.path.exists(presavefold):\n os.makedirs(presavefold)\n pre1 = torch.nn.Sigmoid()(pre[i])\n pre1 = (pre1 - torch.min(pre1)) / (torch.max(pre1) - torch.min(pre1))\n pre1 = np.squeeze(pre1.cpu().data.numpy()) * 255\n pre1 = cv2.resize(pre1, (size[0][1], size[0][0]))\n cv2.imwrite(presavefold + '/' + name[i], pre1)\n\n\n def train(self):\n\n # 一个epoch中训练iter_num个batch\n iter_num = len(self.train_loader.dataset) // self.config.batch_size\n aveGrad = 0\n if not os.path.exists(tmp_path):\n os.mkdir(tmp_path)\n for epoch in range(self.config.epoch):\n r_sum_loss= 0\n self.net_bone.zero_grad()\n for i, data_batch in enumerate(self.train_loader):\n\n frame1, frame2, frame3, frame4, frame5, label = data_batch['frame1'], data_batch['frame2'], data_batch['frame3'], data_batch['frame4'], data_batch['frame5'],data_batch['label']\n if frame3.size()[2:] != label.size()[2:]:\n print(\"Skip this batch\")\n continue\n if self.config.cuda:\n frame1, frame2, frame3, frame4, frame5, label = frame1.cuda(), frame2.cuda(), frame3.cuda(), frame4.cuda(), frame5.cuda(), label.cuda()\n\n if self.config.train_step == 1:\n pre1 = self.net_bone(frame1)\n else:\n\n pre1 = self.net_bone(frame1, frame2, frame3, frame4, frame5)\n bce = nn.BCEWithLogitsLoss()\n # g = gloss()\n b1 = bce(pre1, label)\n # g1 = g(pre1, label)\n\n loss = b1\n loss.backward()\n aveGrad += 1\n\n if aveGrad % nAveGrad == 0:\n self.optimizer_bone.step()\n self.optimizer_bone.zero_grad()\n aveGrad = 0\n\n if i % showEvery == 0:\n print('epoch: [%2d/%2d], iter: [%5d/%5d] Loss || sal : %10.4f' % (\n epoch, self.config.epoch, i, iter_num,\n loss) )\n print('Learning rate: ' + str(self.lr_bone))\n\n if i % 50 == 0:\n vutils.save_image(torch.sigmoid(pre1.data), tmp_path + '/iter%d-sal-0.jpg' % i,\n normalize=True, padding=0)\n # vutils.save_image(torch.sigmoid(edge_out.data), tmp_path + '/iter%d-edge-0.jpg' % i,\n # normalize=True, padding=0)\n vutils.save_image(frame2.data, tmp_path + '/iter%d-sal-data.jpg' % i, padding=0)\n vutils.save_image(label.data, tmp_path + '/iter%d-sal-target.jpg' % i, padding=0)\n\n if (epoch + 1) % self.config.epoch_save == 0:\n torch.save(self.net_bone.state_dict(),\n '%s/epoch_%d_bone.pth' % (self.config.save_fold, epoch + 1))\n\n if epoch in lr_decay_epoch:\n self.lr_bone = self.lr_bone * 0.2\n self.optimizer_bone = Adam(filter(lambda p: p.requires_grad, self.net_bone.parameters()),\n lr=self.lr_bone, weight_decay=p['wd'])\n\n torch.save(self.net_bone.state_dict(), '%s/models/final_bone.pth' % self.config.save_fold)\n\n\ndef gradient(x):\n # tf.image.image_gradients(image)\n h_x = x.size()[-2]\n w_x = x.size()[-1]\n # gradient step=1\n r = F.pad(x, [0, 1, 0, 0])[:, :, :, 1:]\n l = F.pad(x, [1, 0, 0, 0])[:, :, :, :w_x]\n t = F.pad(x, [0, 0, 1, 0])[:, :, :h_x, :]\n b = F.pad(x, [0, 0, 0, 1])[:, :, 1:, :]\n\n xgrad = torch.pow(torch.pow((r - l) * 0.5, 2) + torch.pow((t - b) * 0.5, 2), 0.5)\n\n return xgrad\n\nclass gloss(nn.Module):\n def __init__(self):\n super(gloss, self).__init__()\n\n def forward(self, x, gt):\n x_grad = gradient(x)\n gt_grad = gradient(gt)\n edge = torch.where(gt_grad>0, torch.ones_like(gt), torch.zeros_like(gt))\n gg = (1 - edge) * gt\n mask = torch.where(gg > 0, x_grad, torch.zeros_like(gt))\n l1 = torch.mean(mask)\n\n maske = torch.where(edge>0, x_grad, torch.zeros_like(gt))\n l2 = torch.exp(-torch.mean(maske))\n loss = l1*l2\n return loss\n\n\n", "sub_path": "solver.py", "file_name": "solver.py", "file_ext": "py", "file_size_in_byte": 8127, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "collections.OrderedDict", "line_number": 14, "usage_type": "call"}, {"api_name": "model_TCN.StaticModel", "line_number": 51, "usage_type": "call"}, {"api_name": "model_TCN.Model", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn.Sigmoid", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torch.min", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn.BCEWithLogitsLoss", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 130, "usage_type": "name"}, {"api_name": "torchvision.utils.save_image", "line_number": 151, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 151, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 151, "usage_type": "call"}, {"api_name": "torchvision.utils.save_image", "line_number": 155, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 155, "usage_type": "name"}, {"api_name": "torchvision.utils.save_image", "line_number": 156, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.nn.functional.pad", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 175, "usage_type": "name"}, {"api_name": "torch.nn.functional.pad", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 176, "usage_type": "name"}, {"api_name": "torch.nn.functional.pad", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.nn.functional.pad", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 178, "usage_type": "name"}, {"api_name": "torch.pow", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 184, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 184, "usage_type": "name"}, {"api_name": "torch.where", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 197, "usage_type": "call"}]} +{"seq_id": "81934650", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Article',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=50)),\n ('content', models.TextField()),\n ('keyword', models.CharField(max_length=20)),\n ('readed_count', models.IntegerField()),\n ('comment_count', models.IntegerField()),\n ('create_date', models.DateTimeField(auto_now_add=True)),\n ('update_date', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Article_type',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('article_type', models.CharField(max_length=15)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('comment_content', models.TextField(max_length=100)),\n ('create_date', models.DateTimeField(auto_now_add=True)),\n ('comment_article', models.ForeignKey(to='article.Article')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Reply',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('reply_content', models.TextField(max_length=100)),\n ('create_date', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Users',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('username', models.CharField(max_length=20)),\n ('icon', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='reply',\n name='create_by_user',\n field=models.ForeignKey(to='article.Users'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='reply',\n name='reply_comment',\n field=models.ForeignKey(to='article.Comment'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='comment',\n name='created_by_user',\n field=models.ForeignKey(to='article.Users'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='article',\n name='article_type',\n field=models.ForeignKey(to='article.Article_type'),\n preserve_default=True,\n ),\n ]\n", "sub_path": "article/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 3469, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 67, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 71, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 73, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 82, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 82, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 85, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 85, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 88, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 91, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 94, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "197265171", "text": "from __future__ import absolute_import, unicode_literals\n\nimport environ\nfrom django.utils.translation import ugettext_lazy as _\nfrom os.path import basename\n\nSETTINGS_DIR = environ.Path(__file__) - 1\nDJANGO_ROOT = environ.Path(__file__) - 2\nPROJECT_ROOT = environ.Path(__file__) - 3\nPROJECT_NAME = basename(str(DJANGO_ROOT))\nAPPS_DIR = PROJECT_ROOT.path('apps')\nPROJECT_TEMPLATES = [\n str(PROJECT_ROOT.path('templates')),\n]\n\nenv = environ.Env()\nSECRET_FILE = str(PROJECT_ROOT.path('security/SECRET.key'))\ntry:\n SECRET_KEY = open(SECRET_FILE).read().strip()\nexcept IOError:\n try:\n from django.utils.crypto import get_random_string\n\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!$%&()=+-_'\n SECRET_KEY = get_random_string(50, chars)\n with open(SECRET_FILE, 'w') as f:\n f.write(SECRET_KEY)\n except IOError:\n raise Exception('Could not open %s for writing!' % SECRET_FILE)\n\nDEBUG = env.bool('DEBUG', False)\n\nALLOWED_HOSTS = []\n\nADMINS = (\n ('jonathan', 'jony327@gmail.com'),\n)\nMANAGERS = ADMINS\n\nDJANGO_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n)\n\nTHIRD_PARTY_APPS = (\n 'django_extensions',\n 'rest_framework',\n 'django_sites',\n)\n\nLOCAL_APPS = (\n 'core',\n 'apps.menu',\n 'apps.customer',\n 'apps.taxonomy',\n 'apps.container',\n 'apps.dashboard',\n)\n\nINSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'core.middleware.thread_user.CuserMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nWSGI_APPLICATION = '%s.wsgi.application' % PROJECT_NAME\n\nROOT_URLCONF = '%s.urls' % PROJECT_NAME\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': PROJECT_TEMPLATES,\n 'OPTIONS': {\n 'debug': DEBUG,\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n ],\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ],\n },\n },\n]\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nLANGUAGES = (\n\n ('es', _('Spanish')),\n ('en', _('English')),\n\n)\n\nLANGUAGE_CODE = 'en-us'\n\nLOCALE_PATHS = (\n str(PROJECT_ROOT.path('locale')),\n)\n\nTIME_ZONE = 'America/Lima'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\nSTATIC_URL = '/static/'\n\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = str(PROJECT_ROOT.path('run/static'))\nMEDIA_ROOT = str(PROJECT_ROOT.path('run/media'))\n\nSTATICFILES_DIRS = [\n str(PROJECT_ROOT.path('static')),\n]\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nAUTH_USER_MODEL = 'customer.User'\nLOGIN_URL = '/'\nLOGIN_REDIRECT_URL = '/'\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'formatters': {\n 'complete': {\n 'format': '%(levelname)s:%(asctime)s:%(module)s %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s:%(asctime)s: %(message)s'\n },\n 'null': {\n 'format': '%(message)s',\n },\n },\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'logging.NullHandler',\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['null'],\n 'propagate': True,\n 'level': 'INFO',\n },\n 'django.request': {\n 'handlers': ['mail_admins', 'console'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nCSRF_COOKIE_HTTPONLY = False\nSESSION_COOKIE_HTTPONLY = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_BROWSER_XSS_FILTER = True\nX_FRAME_OPTIONS = 'DENY'\n\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n", "sub_path": "config/settings/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 5737, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "environ.Path", "line_number": 7, "usage_type": "call"}, {"api_name": "environ.Path", "line_number": 8, "usage_type": "call"}, {"api_name": "environ.Path", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 10, "usage_type": "call"}, {"api_name": "environ.Env", "line_number": 16, "usage_type": "call"}, {"api_name": "django.utils.crypto.get_random_string", "line_number": 25, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 124, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "17163899", "text": "import logging\nimport os\n\nfrom antlr4.TokenStreamRewriter import TokenStreamRewriter\n\nfrom gen.javaLabeled.JavaParserLabeled import JavaParserLabeled\nfrom gen.javaLabeled.JavaParserLabeledListener import JavaParserLabeledListener\nfrom refactorings.utils.utils2 import parse_and_walk\n\ntry:\n import understand as und\nexcept ImportError as e:\n print(e)\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__file__)\n\nROOT_PACKAGE = \"(Unnamed_Package)\"\n\n\nclass DeleteSourceClassListener(JavaParserLabeledListener):\n def __init__(self, rewriter: TokenStreamRewriter, class_name: str):\n self.rewriter = rewriter\n self.class_name = class_name\n\n def enterClassDeclaration(self, ctx: JavaParserLabeled.ClassDeclarationContext):\n if ctx.IDENTIFIER().getText() == self.class_name:\n self.rewriter.delete(\n program_name=self.rewriter.DEFAULT_PROGRAM_NAME,\n from_idx=ctx.parentCtx.start.tokenIndex,\n to_idx=ctx.parentCtx.stop.tokenIndex\n )\n\n\nclass UpdateImportsListener(JavaParserLabeledListener):\n def __init__(self, rewriter: TokenStreamRewriter, source_package: str, target_package: str, class_name: str):\n self.rewriter = rewriter\n self.source_package = source_package\n self.target_package = target_package\n self.class_name = class_name\n self.current_package = None\n\n self.imported = False\n self.import_loc = None\n\n def enterPackageDeclaration(self, ctx: JavaParserLabeled.PackageDeclarationContext):\n self.current_package = ctx.qualifiedName().getText()\n\n def exitPackageDeclaration(self, ctx:JavaParserLabeled.PackageDeclarationContext):\n self.import_loc = ctx.stop\n\n def enterImportDeclaration(self, ctx: JavaParserLabeled.ImportDeclarationContext):\n if self.target_package in ctx.getText():\n self.imported = True\n if self.class_name in ctx.getText():\n if self.target_package == self.current_package:\n replace_text = \"\"\n else:\n replace_text = f\"import {self.target_package}.{self.class_name};\\n\"\n\n self.rewriter.replaceRangeTokens(\n from_token=ctx.start,\n to_token=ctx.stop,\n text=replace_text,\n program_name=self.rewriter.DEFAULT_PROGRAM_NAME\n )\n\n def exitCompilationUnit(self, ctx:JavaParserLabeled.CompilationUnitContext):\n if not self.imported:\n self.rewriter.insertAfterToken(\n token=self.import_loc,\n text=f\"\\nimport {self.target_package}.{self.class_name};\\n\",\n program_name=self.rewriter.DEFAULT_PROGRAM_NAME\n )\n\n\nclass MoveClassAPI:\n def __init__(self, udb_path: str, source_package: str, target_package: str, class_name: str):\n self.udb_path = udb_path\n self.source_package = source_package\n self.target_package = target_package\n self.class_name = class_name\n\n self.source_package_dir = None\n self.target_package_dir = None\n self.class_dir = None\n self.class_content = None\n self.usages = None\n self.new_class_path = None\n\n def check_preconditions(self) -> bool:\n if self.source_package == self.target_package:\n logger.error(\"Source and target packages are same.\")\n return False\n\n if self.source_package == ROOT_PACKAGE or self.target_package == ROOT_PACKAGE:\n logger.error(\"Can not move package to/from root package.\")\n return False\n\n # Get package directories\n source_package_dir, target_package_dir = self.get_package_directories()\n if source_package_dir is None or target_package_dir is None:\n logger.error(\"Package entity does not exists.\")\n return False\n\n if not os.path.exists(os.path.join(source_package_dir, f\"{self.class_name}.java\")):\n logger.error(\"Class does not exists in source package.\")\n return False\n\n # Get class directory\n class_dir, class_content, usages = self.get_class_info()\n if class_dir is None or class_content is None:\n logger.error(\"Class entity does not exists.\")\n return False\n\n new_class_path = os.path.join(target_package_dir, f\"{self.class_name}.java\")\n if os.path.exists(new_class_path):\n logger.error(\"Class already exists in target package.\")\n return False\n\n self.source_package_dir = source_package_dir\n self.target_package_dir = target_package_dir\n self.class_dir = class_dir\n self.class_content = class_content\n self.usages = usages\n self.new_class_path = new_class_path\n\n return True\n\n def get_package_directories(self):\n db = und.open(self.udb_path)\n sp = None\n tp = None\n for ent in db.ents(\"Package\"):\n long_name = ent.longname()\n if long_name == self.source_package and sp is None:\n sp = os.path.dirname(ent.parent().longname())\n if long_name == self.target_package and tp is None:\n tp = os.path.dirname(ent.parent().longname())\n db.close()\n return sp, tp\n\n def get_class_info(self):\n db = und.open(self.udb_path)\n class_path = None\n class_contents = None\n usages = set()\n\n for ent in db.ents(\"Class\"):\n simple_name = ent.simplename()\n if simple_name == self.class_name and class_path is None:\n class_contents = ent.contents()\n class_path = ent.parent().longname()\n\n for ref in ent.refs():\n if ref.file().simplename() != f\"{simple_name}.java\":\n usages.add(ref.file().longname())\n break\n db.close()\n return class_path, class_contents, usages\n\n def do_refactor(self):\n if not self.check_preconditions():\n logger.error(\"Pre conditions failed.\")\n return False\n print(self.usages)\n # Update usages\n for file_path in self.usages:\n parse_and_walk(\n file_path=file_path,\n listener_class=UpdateImportsListener,\n has_write=True,\n source_package=self.source_package,\n target_package=self.target_package,\n class_name=self.class_name\n )\n\n # Delete source class\n os.remove(self.class_dir)\n\n # Write the new class\n with open(self.new_class_path, 'w') as f:\n package = \"\"\n if self.target_package != ROOT_PACKAGE:\n package = f\"package {self.target_package};\\n\\n\"\n imports = \"\"\n if self.source_package != ROOT_PACKAGE:\n imports = f\"import {self.source_package}.*;\\n\\n\"\n\n f.write(package + imports + self.class_content)\n\n return True\n\n\ndef main(udb_path: str, source_package: str, target_package: str, class_name: str):\n return MoveClassAPI(\n udb_path, source_package, target_package, class_name\n ).do_refactor()\n\n\nif __name__ == '__main__':\n main(\n udb_path=\"D:\\Dev\\JavaSample\\JavaSample1.udb\",\n class_name=\"RemoveFlagArg\",\n source_package=\"my_package\",\n target_package=\"your_package\", # \"(Unnamed_Package)\"\n )\n", "sub_path": "refactorings/move_class.py", "file_name": "move_class.py", "file_ext": "py", "file_size_in_byte": 7464, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.basicConfig", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "gen.javaLabeled.JavaParserLabeledListener.JavaParserLabeledListener", "line_number": 21, "usage_type": "name"}, {"api_name": "antlr4.TokenStreamRewriter.TokenStreamRewriter", "line_number": 22, "usage_type": "name"}, {"api_name": "gen.javaLabeled.JavaParserLabeled.JavaParserLabeled.ClassDeclarationContext", "line_number": 26, "usage_type": "attribute"}, {"api_name": "gen.javaLabeled.JavaParserLabeled.JavaParserLabeled", "line_number": 26, "usage_type": "name"}, {"api_name": "gen.javaLabeled.JavaParserLabeledListener.JavaParserLabeledListener", "line_number": 35, "usage_type": "name"}, {"api_name": "antlr4.TokenStreamRewriter.TokenStreamRewriter", "line_number": 36, "usage_type": "name"}, {"api_name": "gen.javaLabeled.JavaParserLabeled.JavaParserLabeled.PackageDeclarationContext", "line_number": 46, "usage_type": "attribute"}, {"api_name": "gen.javaLabeled.JavaParserLabeled.JavaParserLabeled", "line_number": 46, "usage_type": "name"}, {"api_name": "gen.javaLabeled.JavaParserLabeled.JavaParserLabeled.PackageDeclarationContext", "line_number": 49, "usage_type": "attribute"}, {"api_name": "gen.javaLabeled.JavaParserLabeled.JavaParserLabeled", "line_number": 49, "usage_type": "name"}, {"api_name": "gen.javaLabeled.JavaParserLabeled.JavaParserLabeled.ImportDeclarationContext", "line_number": 52, "usage_type": "attribute"}, {"api_name": "gen.javaLabeled.JavaParserLabeled.JavaParserLabeled", "line_number": 52, "usage_type": "name"}, {"api_name": "gen.javaLabeled.JavaParserLabeled.JavaParserLabeled.CompilationUnitContext", "line_number": 68, "usage_type": "attribute"}, {"api_name": "gen.javaLabeled.JavaParserLabeled.JavaParserLabeled", "line_number": 68, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "understand.open", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "understand.open", "line_number": 144, "usage_type": "call"}, {"api_name": "refactorings.utils.utils2.parse_and_walk", "line_number": 169, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 179, "usage_type": "call"}]} +{"seq_id": "617991203", "text": "__author__ = 'Arpit'\n\nimport unittest\nimport json\nfrom Airline.src.Graph import Graph\nfrom Airline.src.UIConsole import UIConsole\n\n\nclass Test_graph(unittest.TestCase):\n\n #This function is run before every test case is executed\n #def setUp(self):\n\n #The parser function\n def parse_file(self, json_file):\n with open(json_file) as map_data:\n data = json.load(map_data)\n return data\n\n #Testing creating a vertex. Each metro in the JSON file should get created as a vertex of the graph\n def test_create_vertices(self, json_file):\n data = self.parse_file(\"map_data_json\")\n graph=Graph()\n graph.create_vertices(data)\n print(graph.cityCodeList)\n list = ['SCL', 'LIM', 'MEX', 'BOG', 'BUE', 'SAO', 'LOS', 'FIH', 'JNB', 'KRT', 'CAI', 'ALG', 'MAD', 'LON', 'PAR', 'MIL', 'ESS', 'LED', 'MOW', 'IST', 'BGW', 'THR', 'RUH', 'KHI', 'DEL', 'BOM', 'MAA', 'CCU', 'BKK', 'HKG', 'SHA', 'PEK', 'ICN', 'TYO', 'OSA', 'TPE', 'MNL', 'SGN', 'JKT', 'SYD', 'LAX', 'SFO', 'CHI', 'ATL', 'MIA', 'WAS', 'NYC', 'YYZ', 'CMI']\n self.assertEquals(list, graph.cityCodeList)\n\n #Testing whether the longest flight distance is the same as 12051\n def test_longest_flight(self):\n data = self.parse_file('map_data.json')\n graph = Graph()\n graph.create_vertices(data)\n graph.create_edges(data)\n self.assertEquals(12051, graph.longest_flight(data))\n\n #Testing whether the shortest flight duration is the same as 334\n def test_shortest_flight(self):\n data = self.parse_file('map_data.json')\n graph = Graph()\n graph.create_vertices(data)\n graph.create_edges(data)\n self.assertEquals(334, graph.shortest_flight(data))\n\n #Testing whether the Average distance covered by all CSAir flights is 2300.276595744681\n def test_average_distance(self):\n data = self.parse_file('map_data.json')\n gObj = Graph()\n gObj.create_vertices(data)\n gObj.create_edges(data)\n self.assertEquals(2300.276595744681, gObj.average_distance(data))\n\n #Testing whether the biggest city population is equal to 34000000\n def test_biggest_city(self):\n data = self.parse_file('map_data.json')\n graph = Graph()\n graph.create_vertices(data)\n graph.create_edges(data)\n self.assertEquals(34000000, graph.biggest_city(data))\n\n #Testing whether the smallest city population is equal to 589900\n def test_smallest_city(self):\n data = self.parse_file('map_data.json')\n graph = Graph()\n graph.create_vertices(data)\n graph.create_edges(data)\n self.assertEquals(589900, graph.smallest_city(data))\n\n #The average city size is equal to 11796143.75\n def test_average_city_size(self):\n data = self.parse_file('map_data.json')\n graph = Graph()\n graph.create_vertices(data)\n graph.create_edges(data)\n self.assertEquals(11796143.75, graph.shortest_flight(data))\n\n #def test_csAir_continents(self):\n # self.fail()\n\n #def test_find_city(self):\n # self.fail()\n\n if __name__ == '__main__':\n unittest.main()", "sub_path": "src/Test_graph.py", "file_name": "Test_graph.py", "file_ext": "py", "file_size_in_byte": 3161, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 17, "usage_type": "call"}, {"api_name": "Airline.src.Graph.Graph", "line_number": 23, "usage_type": "call"}, {"api_name": "Airline.src.Graph.Graph", "line_number": 32, "usage_type": "call"}, {"api_name": "Airline.src.Graph.Graph", "line_number": 40, "usage_type": "call"}, {"api_name": "Airline.src.Graph.Graph", "line_number": 48, "usage_type": "call"}, {"api_name": "Airline.src.Graph.Graph", "line_number": 56, "usage_type": "call"}, {"api_name": "Airline.src.Graph.Graph", "line_number": 64, "usage_type": "call"}, {"api_name": "Airline.src.Graph.Graph", "line_number": 72, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "123895773", "text": "__author__ = 'Dryfire117'\n\nimport socket\nimport pyaudio\nfrom threading import Thread\nimport time\n\nSEND_IP = '' # Where is the packet forwarding server located?\nGET_IP = '' # Receive voice at localhost which is usually 127.0.0.1 - Leave this alone\nRECEIVE_PORT = 50005\nSEND_PORT = 50006\n\n# Variables for Pyaudio streams\nCHUNK = 1024\nFORMAT = pyaudio.paInt16\nCHANNELS = 2\nRATE = 10240\n\n\nclass Send_Voice(Thread):\n def __init__(self):\n Thread.__init__(self)\n\n def run(self):\n\n p = pyaudio.PyAudio()\n\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n sock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\n\n while True:\n data = stream.read(CHUNK)\n sock.sendto(data, (SEND_IP, SEND_PORT))\n\n\nclass Get_Voice(Thread):\n def __init__(self):\n Thread.__init__(self)\n self.p = None\n self.sock = None\n self.stream = None\n\n def run(self):\n\n self.p = pyaudio.PyAudio()\n\n self.stream = self.p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n output=True)\n\n self.sock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\n\n self.sock.bind((GET_IP, RECEIVE_PORT))\n\n while True:\n data, addr = self.sock.recvfrom(CHUNK * CHANNELS * 2)\n self.stream.write(data)\n\n\nif __name__ == '__main__':\n\n SEND_IP = raw_input('Please enter the IP: ')\n\n clientThread = Send_Voice()\n serverThread = Get_Voice()\n\n serverThread.start()\n clientThread.start()\n\n clientThread.join()\n serverThread.join()", "sub_path": "Client.py", "file_name": "Client.py", "file_ext": "py", "file_size_in_byte": 1892, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pyaudio.paInt16", "line_number": 15, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 20, "usage_type": "name"}, {"api_name": "threading.Thread.__init__", "line_number": 22, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 22, "usage_type": "name"}, {"api_name": "pyaudio.PyAudio", "line_number": 26, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 34, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 34, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 35, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 42, "usage_type": "name"}, {"api_name": "threading.Thread.__init__", "line_number": 44, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 44, "usage_type": "name"}, {"api_name": "pyaudio.PyAudio", "line_number": 51, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 58, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 58, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 59, "usage_type": "attribute"}]} +{"seq_id": "176074410", "text": "# -*- coding: utf-8 -*-\n\"\"\"Isotherm plotting.\"\"\"\nfrom io import StringIO\nimport panel as pn\nimport bokeh.models as bmd\nfrom bokeh.plotting import figure\nfrom .submission import Submissions, Isotherm\n\nTOOLS = ['pan', 'wheel_zoom', 'box_zoom', 'reset', 'save']\n\n\ndef get_bokeh_plot(isotherm_dict, pressure_scale='linear'):\n \"\"\"Plot isotherm using bokeh.\n\n :returns: bokeh Figure instance\n \"\"\"\n p = figure(tools=TOOLS, x_axis_type=pressure_scale) # pylint: disable=invalid-name\n\n pressures = [point['pressure'] for point in isotherm_dict['isotherm_data']]\n\n for i in range(len(isotherm_dict['adsorbates'])):\n adsorbate = isotherm_dict['adsorbates'][i]\n adsorption = [point['species_data'][i]['adsorption'] for point in isotherm_dict['isotherm_data']]\n\n data = bmd.ColumnDataSource(data=dict(index=range(len(pressures)), pressure=pressures, adsorption=adsorption))\n\n p.line( # pylint: disable=too-many-function-args\n 'pressure',\n 'adsorption',\n source=data,\n legend_label=adsorbate['name'])\n p.circle( # pylint: disable=too-many-function-args\n 'pressure',\n 'adsorption',\n source=data,\n legend_label=adsorbate['name'])\n\n # update labels\n p.xaxis.axis_label = 'Pressure [{}]'.format(isotherm_dict['pressureUnits'])\n p.yaxis.axis_label = 'Adsorption [{}]'.format(isotherm_dict['adsorptionUnits'])\n\n tooltips = [(p.xaxis.axis_label, '@pressure'), (p.yaxis.axis_label, '@adsorption')]\n hover = bmd.HoverTool(tooltips=tooltips)\n p.tools.pop()\n p.tools.append(hover)\n\n return p\n\n\nclass IsothermPlot():\n \"\"\"Plot of isotherm data for consistency check.\n \"\"\"\n def __init__(self, isotherm_dict=None, figure_image=None):\n \"\"\"Create plot of isotherm data for consistency check.\n\n :param isotherm_dict: Isotherm dictionary (optional).\n \"\"\"\n self.row = pn.Row(figure(tools=TOOLS))\n self._isotherm_dict = isotherm_dict\n self._figure_image = figure_image\n\n self.btn_download = pn.widgets.FileDownload(filename='data.json',\n button_type='primary',\n callback=self.on_click_download)\n self.btn_download.data = '' # bug in panel https://github.com/holoviz/panel/issues/1598\n self.btn_add = pn.widgets.Button(name='Add to submission', button_type='primary')\n self.btn_add.on_click(self.on_click_add)\n\n self.inp_pressure_scale = pn.widgets.RadioButtonGroup(name='Pressure scale', options=['linear', 'log'])\n self.inp_pressure_scale.param.watch(self.on_click_set_scale, 'value')\n\n self.submissions = Submissions()\n\n def update(self, isotherm_dict, figure_image=None):\n \"\"\"Update isotherm plot with provided data.\n\n Updates figure as well as internal data representation.\n\n :param isotherm_dict: Dictionary with parsed isotherm data.\n :param figure_image: Byte stream with figure snapshot\n \"\"\"\n self._isotherm_dict = isotherm_dict\n self._figure_image = figure_image\n self.row[0] = get_bokeh_plot(isotherm_dict)\n\n @property\n def isotherm(self):\n \"\"\"Return Isotherm() instance.\"\"\"\n isotherm_dict = self._isotherm_dict\n display_name = '{} ({})'.format(isotherm_dict['articleSource'], isotherm_dict['DOI'])\n return Isotherm(name=display_name, json=isotherm_dict, figure_image=self._figure_image)\n\n def on_click_download(self):\n \"\"\"Download JSON file.\"\"\"\n return StringIO(self.isotherm.json_str)\n\n def on_click_add(self, event): # pylint: disable=unused-argument\n \"\"\"Add isotherm to submission.\"\"\"\n self.submissions.append(self.isotherm)\n\n def on_click_set_scale(self, event): # pylint: disable=unused-argument\n \"\"\"Set pressure scale.\"\"\"\n self.row[0] = get_bokeh_plot(self._isotherm_dict, pressure_scale=self.inp_pressure_scale.value)\n\n @property\n def layout(self):\n \"\"\"Return layout.\"\"\"\n return pn.Column(\n pn.pane.HTML(\"\"\"

Isotherm plot

\"\"\"),\n self.row,\n self.inp_pressure_scale,\n pn.Row(self.btn_download, self.btn_add),\n self.submissions.layout,\n )\n", "sub_path": "digitizer/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 4325, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "bokeh.plotting.figure", "line_number": 17, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 25, "usage_type": "call"}, {"api_name": "bokeh.models", "line_number": 25, "usage_type": "name"}, {"api_name": "bokeh.models.HoverTool", "line_number": 43, "usage_type": "call"}, {"api_name": "bokeh.models", "line_number": 43, "usage_type": "name"}, {"api_name": "panel.Row", "line_number": 58, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 58, "usage_type": "call"}, {"api_name": "panel.widgets.FileDownload", "line_number": 62, "usage_type": "call"}, {"api_name": "panel.widgets", "line_number": 62, "usage_type": "attribute"}, {"api_name": "panel.widgets.Button", "line_number": 66, "usage_type": "call"}, {"api_name": "panel.widgets", "line_number": 66, "usage_type": "attribute"}, {"api_name": "panel.widgets.RadioButtonGroup", "line_number": 69, "usage_type": "call"}, {"api_name": "panel.widgets", "line_number": 69, "usage_type": "attribute"}, {"api_name": "submission.Submissions", "line_number": 72, "usage_type": "call"}, {"api_name": "submission.Isotherm", "line_number": 91, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 95, "usage_type": "call"}, {"api_name": "panel.Column", "line_number": 108, "usage_type": "call"}, {"api_name": "panel.pane.HTML", "line_number": 109, "usage_type": "call"}, {"api_name": "panel.pane", "line_number": 109, "usage_type": "attribute"}, {"api_name": "panel.Row", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "294062589", "text": "'''\nScript for retrieving weather data via OpenWeatherMap API\n • api_key_1 = 'f8fb4a57826b52cf7a7bf5225eaec506'\n • api_key_2 = 'a0b65bac25867383266ad37aa1373f4d'\nPre-requisites:\n • minio/mc: for AWS S3 connection. You will need access key and secret key.\n • pytz\n'''\n\n# city & id list\nlocation = ['Sapporo', 'Sendai', 'Niigata', 'Tokyo', 'Nagoya', 'Osaka', 'Hiroshima', 'Kochi', 'Fukuoka', 'Miyazaki', 'Naha']\nloc_id = [2128295, 2111149, 1855431, 1850147, 1856057, 1853909, 1862415, 1859146, 1863967, 1856717, 1856035]\n\nfrom datetime import datetime\nfrom pytz import timezone\nimport requests\nimport pytz\nimport json, glob, os\n\n\ndef weather_get(id):\n key = 'f8fb4a57826b52cf7a7bf5225eaec506'\n api = 'http://api.openweathermap.org/data/2.5/weather?id={}&APPID={}'\n url = api.format(id, key)\n response = requests.get(url)\n data = json.loads(response.text)\n # convert UTC time to Asia/Tokyo (human readable)\n dt_obj = datetime.fromtimestamp(int(data['dt']))\n dt_jp = pytz.utc.localize(dt_obj, is_dst=None).\\\n astimezone(pytz.timezone('Asia/Tokyo'))\n dt = dt_jp.strftime('%Y-%m-%d %H:%M:%S')\n # replace datetime with local datetime\n data['dt'] = dt\n return dt, data\n\ndef dump(city, dt, data):\n month = dt[:(dt.index('-') + 3)].replace('-', '')\n hour = dt[:dt.index(':')] + 'hour'\n try:\n with open ('{}/{}/{}.json'.format(ab_path, city, month), 'r', encoding='utf-8') as f:\n temp_json = json.load(f)\n if hour not in temp_json:\n temp_json[hour] = data\n # else: don't change anything\n with open ('{}/{}/{}.json'.format(ab_path, city, month), 'w', encoding='utf-8') as f1:\n json.dump(temp_json, f1)\n except FileNotFoundError as e:\n with open ('{}/{}/{}.json'.format(ab_path, city, month), 'w', encoding='utf-8') as f2:\n temp_json = {}\n temp_json[hour] = data\n json.dump(temp_json, f2)\n\ndef main():\n # check if city folders exist\n folders = glob.glob(ab_path+'/*/')\n names = [x.replace(ab_path, '').replace('/', '') for x in folders]\n if sorted(names) != sorted(location):\n print ('Folders don\\'t exist! Creating folders...')\n for i in location:\n os.system('mkdir {}/{}'.format(ab_path,i))\n print ('Finished creating folders. Now start api...\\n...')\n # start api...\n for id in loc_id:\n dt, data = weather_get(id)\n city = location[loc_id.index(id)]\n print ('Now city: ', city)\n dump(city, dt, data)\n print ('...\\nDone.')\n\nif __name__ == '__main__':\n ab_path = '/home/ubuntu/sdb/work/weather_cron'\n main()\n", "sub_path": "openweather_cron/openweather.py", "file_name": "openweather.py", "file_ext": "py", "file_size_in_byte": 2681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "pytz.utc.localize", "line_number": 29, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 30, "usage_type": "call"}, {"api_name": "json.load", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 46, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 51, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 55, "usage_type": "call"}, {"api_name": "os.system", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "538912756", "text": "from multiprocessing import Pool\nfrom os import getpid\n\ndef double(i):\n print(\"I'm process\", getpid())\n return i * 2\n\nif __name__ == '__main__':\n with Pool() as pool:\n result = pool.map(double, [1, 2, 3, 4, 5])\n print(result)\n", "sub_path": "2018/test2.py", "file_name": "test2.py", "file_ext": "py", "file_size_in_byte": 249, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.getpid", "line_number": 5, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "151163811", "text": "import os\nimport pytest\nimport tempfile\nimport threading\nimport time\n\ntmp_files = []\ntmp_files_ready = threading.Event()\ntest_timeout = 10\nexpected_cron_file_content = 'The cronjob ran successfully!'\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef tmp_file(*_):\n tmp_file = tempfile.NamedTemporaryFile(prefix='platypush-test-cron-',\n suffix='.txt', delete=False)\n tmp_files.append(tmp_file.name)\n tmp_files_ready.set()\n yield tmp_file.name\n\n for f in tmp_files:\n if os.path.isfile(f):\n os.unlink(f)\n\n\ndef test_cron_execution(tmp_file):\n \"\"\"\n Test that the cronjob in ``../etc/scripts/test_cron.py`` runs successfully.\n \"\"\"\n actual_cron_file_content = None\n test_start = time.time()\n\n while actual_cron_file_content != expected_cron_file_content and \\\n time.time() - test_start < test_timeout:\n with open(tmp_file, 'r') as f:\n actual_cron_file_content = f.read()\n time.sleep(0.5)\n\n assert actual_cron_file_content == expected_cron_file_content, \\\n 'cron_test failed to run within {} seconds'.format(test_timeout)\n\n\nif __name__ == '__main__':\n pytest.main()\n\n\n# vim:sw=4:ts=4:et:\n", "sub_path": "tests/test_cron.py", "file_name": "test_cron.py", "file_ext": "py", "file_size_in_byte": 1224, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "threading.Event", "line_number": 8, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 13, "usage_type": "call"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.main", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "328979717", "text": "#!/usr/bin/env python\nfrom __future__ import print_function\nimport matplotlib as mpl\nif __name__ == '__main__':\n mpl.use('Agg')\nimport sewpy as sew\nfrom astropy.io import fits\nimport astropy.table as atpy\nfrom os import getcwd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport warnings\nimport os\nimport tempfile\n\n# sextractor output columns\nsexparams = ['NUMBER',\n 'X_IMAGE',\n 'Y_IMAGE',\n 'XWIN_IMAGE',\n 'YWIN_IMAGE',\n 'MAG_AUTO',\n 'FLUX_AUTO',\n 'FLUX_APER(1)']\n\n\ndef sextractor_setup(var_fname, segm_fname, bg_fname, aper=5, workdir=None):\n# sextractor config parameters\n sexconfig = {'CHECKIMAGE_TYPE': 'SEGMENTATION,BACKGROUND',\n 'CHECKIMAGE_NAME': ','.join([segm_fname,bg_fname]),\n 'CATALOG_TYPE': 'FITS_1.0',\n 'WEIGHT_TYPE': 'MAP_VAR',\n 'WEIGHT_IMAGE': var_fname,\n 'PHOT_APERTURES': '%f'%(aper)\n #'CATALOG_NAME': cat_fname\n}\n print (sexconfig)\n # set up sextractor\n sexpath = os.environ.get('SEX_PATH') or '/usr/bin/sextractor'\n sex = sew.SEW(workdir=workdir or getcwd(),\n sexpath=sexpath,\n params=sexparams,\n config=sexconfig)\n sex._clean_workdir()\n return sex\n\n\ndef get_segment(image, var, bg, segmentation, source):\n segment = segmentation == source['NUMBER']\n x = np.sum(segment, axis=0)\n y = np.sum(segment, axis=1)\n nzx = np.nonzero(x)[0]\n nzy = np.nonzero(y)[0]\n x1, x2 = nzx[0], nzx[-1]\n y1, y2 = nzy[0], nzy[-1]\n region = (slice(y1, y2+1), slice(x1, x2+1))\n cutout = image[region]-bg[region]\n # IMPORTANT I background subtract the image\n varcut = var[region]\n y_pos = source['YWIN_IMAGE'] - y1 -1 \n x_pos = source['XWIN_IMAGE'] - x1 -1 \n # we subtract 1, because Sex's positions are 1-based\n\n varcut[~segment[region]] = np.median(varcut)*100\n # set variance to high value in pixels not marked by segmentation map\n return {'cutout':cutout,'var':varcut,'x': x_pos, 'y': y_pos, 'FLUX_AUTO':source['FLUX_AUTO'], 'FLUX_APER':source['FLUX_APER']}\n\n\ndef get_cutouts(sexcat, segmentation, image, var, bg, sourcelist, figs):\n # grab cutout for each source\n sources = []\n for source in sexcat:\n #print(\"source: {}\".format(source['NUMBER']))\n\n if sourcelist is not None:\n # match to original source_list\n sep = np.hypot(source['XWIN_IMAGE']-sourcelist['X'],\n source['YWIN_IMAGE']-sourcelist['Y'])\n if np.min(sep) > 5:\n warnings.warn(\n \"This source doesn't appear to correspond to real source\")\n continue\n\n thissource = sourcelist[np.argmin(sep)]\n\n # get the cutout\n segm_info = get_segment(image, var, bg, segmentation, source)\n\n # send to dict\n if sourcelist is not None:\n segm_info['star0'] = np.isnan(thissource['n']) # is star? \n segm_info['flux'] = thissource['I']\n segm_info['id'] =thissource['id']\n sources.append(segm_info)\n \n # save the cutout as png files if requested\n if figs:\n plt.figure()\n plt.subplot(121)\n plt.imshow(segmentation == source['NUMBER'])\n plt.gca().invert_yaxis()\n plt.subplot(122) \n minmax = np.percentile(cutout, [5, 95])\n plt.imshow(cutout, vmin=minmax[0], vmax=minmax[1])\n plt.scatter(XY[0], XY[1], marker='+')\n plt.gca().invert_yaxis()\n plt.savefig(\"source_{}.png\".format(source['NUMBER']), dpi=200)\n plt.close()\n\n return sources\n\n\ndef run(imagepath, var_fname, sourcelistpath=None, figs=False):\n dir = tempfile.TemporaryDirectory(dir='./')\n\n bg_fname='%s/bg_temp_%d.fits'%(dir.name,os.getpid())\n segm_fname= '%s/segm_temp_%d.fits'%(dir.name,os.getpid())\n #cat_fname= './cat_temp_%d.fits'%(os.getpid())\n sexObj = sextractor_setup(var_fname, segm_fname, bg_fname, workdir=dir.name)\n # run sextractor\n R=sexObj(imagepath, returncat=False)\n cat_fname = R['catfilepath']\n\n # load the original image\n image = fits.getdata(imagepath, -1, view=np.array)\n bg = fits.getdata(bg_fname)\n var = fits.getdata(var_fname)\n if sourcelistpath is not None:\n # load the original source list\n sourcelist = fits.getdata(sourcelistpath, -1, view=np.recarray)\n else:\n sourcelist = None\n\n # load the generated segmentation map\n segmentation = fits.getdata(segm_fname, -1, view=np.array)\n\n # load the generated source catalogue\n\n sexcat = atpy.Table().read(cat_fname,format='fits')\n # get the cutouts\n sources = get_cutouts(sexcat, segmentation, image, var, bg, sourcelist, figs)\n\n # sources is a dict, the keys are the original source ids (i.e. the source\n # ids from the source list fits file produced by gen_field.py). The values\n # are tuples containing [0] a list of X and Y position inside the cutout,\n # [1] a numpy array of the cutout itself, and if a source list is given:\n # [2] is whether the object is a star.\n os.unlink(bg_fname)\n os.unlink(segm_fname)\n os.unlink(cat_fname)\n return sources\n\n\nif __name__ == \"__main__\":\n imagepath = 'image.fits'\n var_fname = 'variance.fits'\n sourcelistpath = 'source_list.fits'\n\n run(imagepath, var_fname, figs=False) # True)\n", "sub_path": "gen_cutouts.py", "file_name": "gen_cutouts.py", "file_ext": "py", "file_size_in_byte": 5485, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "matplotlib.use", "line_number": 5, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 39, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sewpy.SEW", "line_number": 40, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.hypot", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 79, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 114, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 116, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 117, "usage_type": "call"}, {"api_name": "astropy.io.fits.getdata", "line_number": 125, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 125, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.getdata", "line_number": 126, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 126, "usage_type": "name"}, {"api_name": "astropy.io.fits.getdata", "line_number": 127, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 127, "usage_type": "name"}, {"api_name": "astropy.io.fits.getdata", "line_number": 130, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.recarray", "line_number": 130, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.getdata", "line_number": 135, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 135, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 135, "usage_type": "attribute"}, {"api_name": "astropy.table.Table", "line_number": 139, "usage_type": "call"}, {"api_name": "astropy.table", "line_number": 139, "usage_type": "name"}, {"api_name": "os.unlink", "line_number": 148, "usage_type": "call"}, {"api_name": "os.unlink", "line_number": 149, "usage_type": "call"}, {"api_name": "os.unlink", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "225872583", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nLorenz system\n\nCreated on Thu May 21 13:23:43 2015\n\n:author: Christopher Strickland\n\"\"\"\n\nfrom __future__ import division\nimport numpy as np\nimport scipy as sp\nfrom scipy.integrate import ode\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\n##### Parameters go here #####\n\n#time points to solve at\ntpts = np.linspace(0,50,5001) #100,110,... for no transients\n#initial values\nx0 = np.array([10.,10.,10.])\n\n####################\nsigma = 10. #0.5 #10.\nbeta = 8./3. #1 #8./3.\nrho = 0.5 #1. #14. #28.\n\n####################\n\n#Observation noise\nOBS_NOISE = False #turn on or off observation noise\n#Assume noise is gaussian\nobs_mu = np.array([0,0,0]) #x,y,and z variables\nobs_sig2 = np.array([.05,.05,.05])\n\n##### ODE function #####\ndef LorenzODEs(t,x):\n dx = np.zeros(3)\n \n dx[0] = sigma*(x[1]-x[0])\n dx[1] = x[0]*(rho-x[2])-x[1]\n dx[2] = x[0]*x[1] - beta*x[2]\n \n return dx\n \n##### Solve procedure goes here #####\nXsol = []; Ysol = []; Zsol = []\nr = ode(LorenzODEs).set_integrator('dopri5',nsteps=100000,verbosity=1)\nr.set_initial_value(x0,0)\nfor t in tpts:\n if t == 0:\n Xsol.append(x0[0]);Ysol.append(x0[1]);Zsol.append(x0[2])\n continue\n r.integrate(t)\n assert(r.successful())\n if OBS_NOISE:\n Xsol.append(max(r.y[0] + sp.random.normal(obs_mu[0],obs_sig2[0]),0))\n Ysol.append(max(r.y[1] + sp.random.normal(obs_mu[1],obs_sig2[1]),0))\n Zsol.append(max(r.y[2] + sp.random.normal(obs_mu[2],obs_sig2[2]),0))\n else:\n Xsol.append(r.y[0])\n Ysol.append(r.y[1])\n Zsol.append(r.y[2])\n \n##### Plot solution #####\nfig = plt.figure()\nplt.subplot(221)\nplt.plot(tpts,Xsol)\nplt.title(r\"Plot of $X$ vs. time\")\nplt.xlabel(r\"$t$\")\nplt.ylabel(r\"$X$\")\nplt.ylim(-20,20)\nplt.subplot(222)\nplt.plot(tpts,Ysol)\nplt.title(r\"Plot of $Y$ vs. time\")\nplt.xlabel(r\"$t$\")\nplt.ylabel(r\"$Y$\")\nplt.ylim(-30,30)\nplt.subplot(223)\nplt.plot(tpts,Zsol)\nplt.title(r\"Plot of $Z$ vs. time\")\nplt.xlabel(r\"$t$\")\nplt.ylabel(r\"$Z$\")\nplt.ylim(0,50)\n#attractor\nax = fig.add_subplot(2,2,4,projection='3d')\nax.plot(Xsol, Ysol, Zsol)\nax.set_title(\"Attractor\")\nax.set_xlabel(r\"X\")\nax.set_ylabel(r\"Y\")\nax.set_zlabel(r\"Z\")\nplt.show()", "sub_path": "Python/Lorenz.py", "file_name": "Lorenz.py", "file_ext": "py", "file_size_in_byte": 2244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.linspace", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "scipy.integrate.ode", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.random.normal", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.random", "line_number": 59, "usage_type": "attribute"}, {"api_name": "scipy.random.normal", "line_number": 60, "usage_type": "call"}, {"api_name": "scipy.random", "line_number": 60, "usage_type": "attribute"}, {"api_name": "scipy.random.normal", "line_number": 61, "usage_type": "call"}, {"api_name": "scipy.random", "line_number": 61, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "643237698", "text": "# -*- coding:utf-8 -*-\nimport requests\nimport unittest\n\n# post请求没有body的情况\nclass WeatherChaXun(unittest.TestCase):\n\n def setUp(self):\n self.url=\"http://v.juhe.cn/weather/index\"\n self.s = requests.session()\n\n def weather_cx(self,place,keys):\n par={\"cityname\": place,\n \"dtype\": \"json\",\n \"format\": \"2\",\n \"key\": keys\n }\n r1=self.s.get(self.url,params=par)\n j1=r1.json()\n print(j1)\n res=j1['reason']\n print(res)\n return res\n\n def test01(self):\n result1=self.weather_cx('深圳',\"ac2e2a9554bbf184a4df4bb9cb10e149\")\n self.assertTrue(result1=='successed!')\n\n def test02(self):\n result2=self.weather_cx('深圳',\" \")\n self.assertTrue(result2=='错误的请求KEY')\n\n # if res=='successed!':\n # print('接口测试成功:pass')\n # else:\n # print('接口测试失败:fail')\n # for i,j in j1.items():\n # print(i,j)\n # print(j1[i])\n\n\nif __name__ == '__main__':\n unittest.main()\n\n", "sub_path": "limaopeng/port_request/body_wu.py", "file_name": "body_wu.py", "file_ext": "py", "file_size_in_byte": 1098, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "requests.session", "line_number": 10, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "292111329", "text": "import os.path\nfrom setuptools import setup, find_packages\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nexec(open('version.py').read())\n\nsetup(name='tornado-project-skeleton',\n version=__version__,\n description='Tornado project skeleton',\n long_description=read('README.md'),\n author='Henning Kage',\n author_email='henning.kage@gmail.com',\n url='https://github.com/hkage/tornado-project-skeleton',\n include_package_data=True,\n classifiers=[],\n packages=find_packages(exclude=['tests']),\n install_requires=[\n 'tornado==4.3',\n ],\n setup_requires=[\n 'pytest-runner==2.6.2',\n ],\n tests_require=[\n 'pytest==2.8.2',\n 'pytest-pep8==1.0.6',\n 'pytest-cov==2.2.0',\n ])\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 786, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 6, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 11, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "17376696", "text": "from django.views.generic import TemplateView\n\nfrom mapstory.models import get_sponsors\n\n\nclass IndexView(TemplateView):\n template_name = 'index.html'\n def get_context_data(self, **kwargs):\n ctx = super(IndexView, self).get_context_data(**kwargs)\n ctx['sponsors'] = get_sponsors()\n return ctx\n", "sub_path": "mapstory/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 320, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.views.generic.TemplateView", "line_number": 6, "usage_type": "name"}, {"api_name": "mapstory.models.get_sponsors", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "319005051", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nimport config\nimport math\nimport datetime\nimport numpy as np\nfrom geopy.distance import vincenty\nfrom models import Department, Location, Course, Section, Session\n\ndef score_free_days(matrix, preferred_num_free_days):\n num_free_days = 0\n\n for row in matrix:\n courses = np.nonzero(row)\n if len(courses) == 0:\n num_free_days += 1\n\n score = 1 - (abs(num_free_days - preferred_num_free_days) / float(4))\n if score < 0:\n score = 0\n\n return score\n\ndef average_time_delta(times, since, ignoreAfter):\n deltatimes = [datetime.timedelta(0, 0, 0, 0, time.minute, time.hour) for time in times]\n deltatimeSince = datetime.timedelta(0, 0, 0, 0, since.minute, since.hour)\n if len(deltatimes) == 0:\n return None\n\n deltas = [deltatime - deltatimeSince for deltatime in deltatimes]\n if ignoreAfter:\n correctDeltas = [datetime.timedelta(0) if delta.total_seconds() > 0 else -delta for delta in deltas]\n else:\n correctDeltas = [datetime.timedelta(0) if delta.total_seconds() < 0 else delta for delta in deltas]\n\n return (reduce(lambda x, y: x + y, correctDeltas) / len(correctDeltas))\n\ndef score_time_boundaries(schedule, preferred_start_time, preferred_end_time):\n start_times = []\n end_times = []\n\n for i in range(0, len(schedule)):\n col = schedule[i]\n stripped_col = col[np.nonzero(col)]\n\n if len(stripped_col) == 0:\n continue\n\n start_times.append(stripped_col[0].start_time)\n end_times.append(stripped_col[len(stripped_col) - 1].end_time)\n\n start_time_delta = average_time_delta(start_times, preferred_start_time, True)\n end_time_delta = average_time_delta(end_times, preferred_end_time, False)\n\n delta_sum = (start_time_delta + end_time_delta)\n\n score = 1 - (abs(delta_sum.total_seconds()) / float(18000))\n if score < 0:\n score = 0\n\n return score\n\ndef score_physical_distances(schedule):\n scores = []\n\n for i in range(0, len(schedule)):\n col = schedule[i]\n sessions = col[np.nonzero(col)]\n uniqueSessions = []\n\n for session in sessions:\n if not session in uniqueSessions:\n uniqueSessions.append(session)\n\n for j in range(0, len(uniqueSessions)):\n if j + 1 == len(uniqueSessions):\n break\n\n s1 = uniqueSessions[j]\n s2 = uniqueSessions[j + 1]\n\n c1 = s1.location.coordinatesTuple()\n c2 = s2.location.coordinatesTuple()\n\n # Vincenty distance between two coordinates\n cd = vincenty(c1, c2).meters\n\n t1 = s1.end_time\n t2 = s2.start_time\n\n td = datetime.timedelta(0, 0, 0, 0, t2.minute, t2.hour) - datetime.timedelta(0, 0, 0, 0, t1.minute, t1.hour)\n\n # 50 m/min is a reasonable walking speed\n reasonable_distance = 50 * (td.total_seconds() / 60)\n\n reasonability_score = cd - reasonable_distance\n\n scores.append(reasonability_score)\n\n # print(\"%d meters distance and %d minute time distance between %s and %s, reasonability score: %d\" % (cd, td.total_seconds() / 60, s1.section.course.name, s2.section.course.name, reasonability_score))\n\n if len(scores) == 0:\n return 1\n\n max_deviation = max(scores)\n score = 1 - (max_deviation / float(1000))\n if score < 0:\n score = 0\n\n if score > 1:\n score = 1\n\n return score\n\n\ndef score(matrix, preferred_start_time, preferred_end_time, preferred_num_free_days):\n days_of_week_score = score_free_days(matrix, preferred_num_free_days)\n time_boundaries_score = score_time_boundaries(matrix, preferred_start_time, preferred_end_time)\n distance_score = score_physical_distances(matrix)\n\n final_score = days_of_week_score + time_boundaries_score + distance_score\n return final_score\n", "sub_path": "app/score.py", "file_name": "score.py", "file_ext": "py", "file_size_in_byte": 3908, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.nonzero", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 69, "usage_type": "call"}, {"api_name": "geopy.distance.vincenty", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "112665155", "text": "#!/usr/bin/python\n\nimport subprocess\nimport sys\nimport argparse\nimport shutil\nimport os\n\n\nsrcPath = os.path.dirname(os.path.realpath(__file__))\nbuildPath = srcPath + '/build'\n\ntargetOptions = [\n \"linux\",\n \"win32\"\n]\n\nparser = argparse.ArgumentParser(description='AlgoVi build script')\nparser.add_argument('-c', '--clean_build', action='store_const', const=True, default=False, help='Make clean build')\nparser.add_argument('-d', '--debug', action='store_const', const=True, default=False, help='Debug mode build')\nparser.add_argument('-t', '--target', default=\"linux\", choices=targetOptions, help=\"operation system target = { linux, win }\")\nparser.add_argument('makeargs', nargs=\"?\", default=\"install\", help=\"make arguments\")\n\nargs = parser.parse_args()\n\nbuildType = 'Debug' if args.debug else 'Release'\nbuildPath = buildPath + '/' + args.target + '-' + buildType\ninstallPath = srcPath+'/algoviApp/'+args.target+'-'+buildType+'/'\n\nif args.clean_build and os.path.exists(buildPath):\n shutil.rmtree(buildPath, True)\n\ncmakeArgs = [\n 'cmake',\n# '-DBoost_USE_STATIC_LIBS=ON',\n srcPath,\n '-DCMAKE_BUILD_TYPE=' + buildType,\n '-DCMAKE_INSTALL_PREFIX=' + installPath,\n '-DALGOVI_TARGET_NAME=' + args.target,\n]\n\nif args.target == \"win32\":\n cmakeArgs.append('-G')\n cmakeArgs.append('MinGW Makefiles')\n\nmakeArgs = []\nif args.target == 'linux':\n makeArgs.append('make')\nelse:\n makeArgs.append('mingw32-make.exe')\nmakeArgs.append(args.makeargs)\n\n\ntry:\n os.makedirs(buildPath)\n os.makedirs(installPath)\nexcept:\n pass\n\nif subprocess.call(cmakeArgs, cwd=buildPath) != 0:\n print(\"Failed to run cmake: \" + repr(cmakeArgs))\n sys.exit(1)\n\nif subprocess.call(makeArgs, cwd=buildPath) != 0:\n print(\"Failed to run make: \" + repr(makeArgs))\n sys.exit(1)\n", "sub_path": "build.py", "file_name": "build.py", "file_ext": "py", "file_size_in_byte": 1787, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 10, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 31, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 55, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 56, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 60, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 62, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "110737918", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nclass Tracker(object):\r\n def __init__(self, ax, CT):\r\n self.ax = ax\r\n ax.set_title('CT Visualization')\r\n self.CT = CT\r\n self.slices = CT.shape[0]\r\n self.ind = self.slices//2\r\n self.im = ax.imshow(self.CT[self.ind, :, :], cmap='gray')\r\n self.update() #upon instatiation, draw the image at self.ind on the axes' canvas\r\n\r\n def scroll(self, event):\r\n if event.button == 'up':\r\n self.ind = (self.ind + 1) % self.slices #modulus, so when we reach the end of the patient, we start over\r\n else:\r\n self.ind = (self.ind - 1) % self.slices\r\n self.update()\r\n\r\n def update(self):\r\n self.im.set_data(self.CT[self.ind, :, :]) #update image data\r\n self.ax.set_ylabel('Slice {}'.format(self.ind)) #update slice label\r\n self.im.axes.figure.canvas.draw() #draw new data on axes\r\n \r\ndef show_CT(ct_file):\r\n ct = np.load(ct_file)\r\n fig, axs = plt.subplots()\r\n tracker = Tracker(axs, ct)\r\n fig.canvas.mpl_connect('scroll_event', tracker.scroll)\r\n plt.show()\r\n \r\nif __name__ == '__main__':\r\n show_CT('ct.npy')", "sub_path": "ct_viz.py", "file_name": "ct_viz.py", "file_ext": "py", "file_size_in_byte": 1192, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.load", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "370281562", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 1 12:59:15 2017\r\n\r\n@author: Huntrer\r\n\"\"\"\r\n\r\nimport math as m\r\nimport numpy as np\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nx = [3.2,6,7.8,1,3,2.5,100]\r\nless_than_four = list(filter(lambda x: x < 4,x))\r\n\r\n\r\nless_than_four = [3.2, 1, 3, 2.5]\r\nplusfive = list(map(lambda x: x+5, less_than_four))\r\nprint(plusfive)\r\n\r\n\r\nx = [3.2,6,7.8,1,3,2.5,100]\r\nfor x in range (2,5):\r\n x = -1\r\nprint(x)\r\n\r\n\r\n\"part 3\"\r\n\r\nnp.mean(x)\r\nprint(np.mean(x))\r\nsqv=x-(np.mean(x))\r\nprint(sqv)\r\n\r\n'------------------question2-----'\r\nx=[1.,5.5,7.2,4.2,-2.7,-5.4,8.9]\r\ny=[0.1,1.5,1.8,-4.2,2.7,-9.4,-1.9]\r\n\r\n\r\n# get the mean of `X` (add all the vals in `X` and divide by\r\n# the length\r\nx_mean = float(sum(X)) / len(X)\r\nprint(x_mean)\r\n# mean for `Y`\r\ny_mean = float(sum(Y)) / len(Y)\r\nprint(y_mean)\r\n\r\ncov = 0\r\n\r\n\r\nfor y_idx,y in enumerate(Y):\r\n for x_idx,x in enumerate(X):\r\n\r\n \r\n cov += (x - x_mean) * (y - y_mean) \r\n\r\nprint (cov) \r\n\r\n'--------------questions3----------'\r\nxv = np.linspace(-4, 4,)\r\nyv = np.zeros(len(xv))\r\n\r\nfor i,x in enumerate(xv):\r\n if x < 0:\r\n yv[i] = np.abs(x)\r\n yv[i] = np.log(x)\r\n \r\n elif (x >= 0) & (x < 2):\r\n yv[i] = -x\r\n \r\n elif (x >= 0) & (x < 2):\r\n yv[i] =(x^2)/(3-x)\r\n\r\nfig, ax = plt.subplots()\r\nax.plot(xv, yv, 'b.')\r\n# For x/y axes in red\r\nax.axvline(x=0, color = 'r')\r\nax.axhline(y=0, color = 'r')\r\n# For the dotted vertical lines at the jump points\r\nax.plot((0, 0), (-1,0), 'k:')\r\nax.plot((1, 1), (-1,1), 'k:')\r\nax.plot((2, 2), (-1,1), 'k:')\r\nax.set_title('Composite discontinuous function')\r\nax.set_xlabel('x')\r\nax.set_ylabel('f(x)')\r\nplt.show()\r\n'------------------question4-----'\r\nA = np.array([[1.2, 3.4, 10.3],[2, 8, 78], [35, -36, 8]])\r\nprint(A)\r\nnp.fill_diagonal(A, -5)\r\nprint(A)\r\n'--------part-b------------'\r\nA = np.array([[1.2, 3.4, 10.3],[2, 8, 78], [35, -36, 8]])\r\nprint(A) \r\nA[1:,0] = 100\r\nA[2:,1] = 100\r\nA[0,1:] = 100\r\nA[1,2:] = 100\r\nprint(\"A = \", A) \r\n'------------------question5-----'\r\npersonlist = [['Julie', 'married', 35000, 'Jack'],['Angie', 'not married', 55000, 'na',],['Sarah', 'married', 45000, 'Jim'],['Jack', 'married', 35000, 'Julie'],['John', 'not married', 25000, 'na'],['Jim', 'married', 35000, 'Sarah']]\r\nmarriedlist = [[]]", "sub_path": "midterm1.py", "file_name": "midterm1.py", "file_ext": "py", "file_size_in_byte": 2293, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.mean", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "200142117", "text": "from typing import Callable, Dict, List, Tuple\n\nfrom summer.legacy.constants import FlowAdjustment\nfrom summer.legacy.stratification import Stratification\n\nfrom .flow import BaseFlow\n\n\nclass BaseTransitionFlow(BaseFlow):\n \"\"\"\n A flow where people move from the source compartment, to the destination.\n Eg. infection, recovery, progress of disease.\n \"\"\"\n\n def update_compartment_indices(self, mapping: Dict[str, float]):\n \"\"\"\n Update index which maps flow compartments to compartment value array.\n \"\"\"\n self.source.idx = mapping[self.source]\n self.dest.idx = mapping[self.dest]\n\n def stratify(self, strat: Stratification) -> List[BaseFlow]:\n \"\"\"\n Returns a list of new, stratified flows to replace the current flow.\n \"\"\"\n is_source_strat = self.source.has_name_in_list(strat.compartments)\n is_dest_strat = self.dest.has_name_in_list(strat.compartments)\n if not (is_dest_strat or is_source_strat):\n # Flow is not stratified, do not stratify this flow.\n return [self]\n\n new_flows = []\n for stratum in strat.strata:\n # Find new compartments\n if is_source_strat:\n new_source = self.source.stratify(strat.name, stratum)\n else:\n new_source = self.source\n\n if is_dest_strat:\n new_dest = self.dest.stratify(strat.name, stratum)\n else:\n new_dest = self.dest\n\n # Find flow adjustments to apply to the new stratified flows.\n # First, we try to find an adjustment for the source compartment.\n # This is for when the source has the required stratifications and the destination does not.\n # For example - people recovering from I -> R with multiple I strata, all with different recovery rates.\n adjustment = strat.get_flow_adjustment(self.source, stratum, self.param_name)\n if not adjustment:\n # Otherwise, try find an adjustment for the destination compartment.\n # This is for when the destination has the required stratifications and the source does not.\n # For example - people recovering from I -> R with multiple R strata, with different recovery proportions.\n adjustment = strat.get_flow_adjustment(self.dest, stratum, self.param_name)\n\n # Should we apply an adjustment to conserve the number of people?\n should_apply_conservation_split = (\n (not strat.is_strain())\n and (not adjustment)\n and (is_dest_strat and not is_source_strat)\n )\n if should_apply_conservation_split:\n # If the source is stratified but not the destination, then we need to account\n # for the resulting fan-out of flows by reducing the flow rate.\n # We don't do this for strains because this effect is already\n # captured by the infecitousness multiplier.\n num_strata = len(strat.strata)\n entry_fraction = 1.0 / num_strata\n adjustment = (FlowAdjustment.MULTIPLY, entry_fraction)\n\n if adjustment:\n new_adjustments = [*self.adjustments, adjustment]\n else:\n new_adjustments = self.adjustments\n\n new_flow = self.copy(\n source=new_source,\n dest=new_dest,\n param_name=self.param_name,\n param_func=self.param_func,\n adjustments=new_adjustments,\n )\n new_flows.append(new_flow)\n\n return new_flows\n", "sub_path": "summer/legacy/flow/base/transition.py", "file_name": "transition.py", "file_ext": "py", "file_size_in_byte": 3683, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flow.BaseFlow", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 15, "usage_type": "name"}, {"api_name": "summer.legacy.stratification.Stratification", "line_number": 22, "usage_type": "name"}, {"api_name": "summer.legacy.constants.FlowAdjustment.MULTIPLY", "line_number": 69, "usage_type": "attribute"}, {"api_name": "summer.legacy.constants.FlowAdjustment", "line_number": 69, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "flow.BaseFlow", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "388441173", "text": "import requests, sys, urllib3, json, os\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nimport time\n\nSTART_URL = 'https://bmm.crick.ac.uk/~svc-bmm-swarmdock/submit.cgi'\nEMAIL = 'ser499webscraper@gmail.com'\n\nif len(sys.argv) != 3:\n print('Usage: -a receptor -a ligand')\n sys.exit(1)\n\n\noptions = Options()\noptions.add_argument(\"--headless\")\nbrowser = webdriver.Firefox(firefox_options=options)\nbrowser.implicitly_wait(30)\n\nbrowser.get(START_URL)\n\nreceptor = sys.argv[1]\nif ':' in receptor:\n receptor = receptor.split(':')[0]\n\nligand = sys.argv[2]\nif ':' in ligand:\n ligand = ligand.split(':')[0]\n\n# upload receptor file\nrec_file = browser.find_element_by_name('receptorfile')\nrec_file.send_keys(os.getcwd() + '/Swarm Dock/pdb/{}.pdb'.format(receptor.upper()))\n\n# upload ligand file\nlig_file = browser.find_element_by_name('ligandfile')\nlig_file.send_keys(os.getcwd() + '/Swarm Dock/pdb/{}.pdb'.format(ligand.upper()))\n\n# set email\nemail_input = browser.find_element_by_name('email')\nemail_input.send_keys(EMAIL)\n\n# set job name\njob_name_input = browser.find_element_by_name('jobname')\njob_name_input.send_keys(sys.argv[1] + '_' + sys.argv[2])\n\n# submit job\nbrowser.find_element_by_name('formSubmitBut').click()\n\ntime.sleep(10)\n\n# get job ID\ncontent_div = browser.find_element_by_id('content')\n\nprint('content {}'.format(content_div.text))\n\nbrowser.quit()\n", "sub_path": "venv/bin/Swarm Dock/swarm_dock_form_submit_crawler.py", "file_name": "swarm_dock_form_submit_crawler.py", "file_ext": "py", "file_size_in_byte": 1403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 11, "usage_type": "call"}, {"api_name": "selenium.webdriver.firefox.options.Options", "line_number": 14, "usage_type": "call"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 16, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 31, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "538747484", "text": "import requests\nimport traceback\nimport zlib\nimport pandas as pd\nimport numpy as np\nimport sqlite3\n\nconn = sqlite3.connect(\"yingye.db\")\n\n\nurl = 'http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?type=SR&sty=YJBB&fd=%s&st=13&sr=-1&p=1&ps=10000&js=(x)&stat=0&rt=50280874'\nbaseUrl = \"http://hq2fls.eastmoney.com/EM_Quote2010PictureApplication/Flash.aspx?Type=CHD&ID=*******&lastnum=300&r=0.6714464421384037\"\n\ndef cal(item):\n code = item[0]\n name = item[1]\n day = item[16]\n yinYeE = item[4]\n yinYeTongBi = item[5]\n yinYeHuanBi = item[6]\n liRun = item[7]\n liRunTongBi = item[8]\n liRunHuanBi = item[9]\n intDay = int(day.replace('-',''))\n if code.startswith('6'):\n tmpUrl = baseUrl.replace('*******', code + '1')\n else:\n tmpUrl = baseUrl.replace('*******', code + '2')\n\n retry = 0\n while True:\n retry += 1\n if retry > 10:\n break\n try:\n r = requests.get(tmpUrl, timeout=5)\n content = zlib.decompress(r.content).decode(\"utf-8\")\n last_close = 1.0\n num = 0\n incs = []\n for line in content.splitlines():\n rows = line.split(',')\n date_str = rows[0]\n open_price = float(rows[1])\n close_price = float(rows[2])\n high_price = float(rows[3])\n low_price = float(rows[4])\n vol = int(rows[5])\n amount = int(rows[6])\n inc = (close_price - last_close) / last_close * 100\n\n intDate = int(date_str.replace(\"-\", ''))\n if intDay <= intDate and num < 1:\n num += 1\n incs.append(inc)\n\n last_close = close_price\n print(code, day, yinYeE, yinYeTongBi, yinYeHuanBi, liRun, liRunTongBi, liRunHuanBi, sum(incs))\n if len(incs) > 0 and 11 > sum(incs) > -11 :\n with open(\"yeji.txt\", \"a\", encoding='utf8', newline='') as f:\n f.write('%s,%s,%s,%s,%s,%s,%s,%s,%s\\r\\n' % (code, day, yinYeE, yinYeTongBi, yinYeHuanBi, liRun, liRunTongBi, liRunHuanBi, sum(incs)))\n break\n except Exception as error:\n traceback.print_exc()\n\n\ndatas = {}\n\ndef main():\n try:\n conn.execute(\n \"CREATE TABLE yye (day VARCHAR(11), code VARCHAR(6), openDay VARCHAR(11), amount FLOAT, profit FLOAT)\")\n conn.execute(\n \"CREATE INDEX day_index ON yye (day)\")\n conn.execute(\n \"CREATE INDEX code_index ON yye (code)\")\n conn.execute(\n \"CREATE UNIQUE INDEX IF NOT EXISTS DayCodeIndex ON yye (day, code)\")\n conn.commit()\n except:\n print(\"table already exist\")\n\n days = []\n for day in range(2017, 2004, -1):\n days.append(str(day) + '-12-31')\n days.append(str(day) + '-09-30')\n days.append(str(day) + '-06-30')\n days.append(str(day) + '-03-31')\n\n for day in days:\n tmpUrl = url % (day,)\n retry = 0\n while True:\n retry += 1\n if retry > 10:\n break\n try:\n r = requests.get(tmpUrl, timeout=10)\n items = r.text.replace('\",\"', '#').replace('\"','').split('#')\n if len(items) > 1:\n for item in items:\n rows = item.split(\",\")\n code = rows[0]\n openDay = rows[16]\n yinYeE = float(rows[4])\n profit = float(rows[7])\n if yinYeE < 1:\n continue\n\n if code.startswith(\"9\"):\n continue\n\n print(day, code, openDay, yinYeE, profit)\n\n try:\n conn.execute(\n \"INSERT INTO yye (day, code, openDay, amount, profit) VALUES (?, ?, ?, ?, ?)\",\n (day, code, openDay, yinYeE, profit))\n\n except Exception as error:\n traceback.print_exc()\n\n break\n except Exception as error:\n traceback.print_exc()\n\n conn.commit()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "sql/yingye_prices.py", "file_name": "yingye_prices.py", "file_ext": "py", "file_size_in_byte": 4311, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sqlite3.connect", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 36, "usage_type": "call"}, {"api_name": "zlib.decompress", "line_number": 37, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 64, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 98, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 121, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "122031683", "text": "#!/usr/bin/env python\n\"\"\"\nGuide users through installing Glue's dependencies\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nfrom collections import OrderedDict\n\n# Unfortunately, we can't rely on setuptools' install_requires\n# keyword, because matplotlib doesn't properly install its dependencies\nfrom subprocess import check_call, CalledProcessError\nimport sys\nimport importlib\n\n\nclass Dependency(object):\n\n def __init__(self, module, info, package=None, min_version=None):\n self.module = module\n self.info = info\n self.package = package or module\n self.min_version = min_version\n self.failed = False\n\n @property\n def installed(self):\n try:\n importlib.import_module(self.module)\n return True\n except ImportError:\n return False\n\n @property\n def version(self):\n try:\n module = __import__(self.module)\n return module.__version__\n except ImportError:\n return 'unknown version'\n except AttributeError:\n try:\n return module.__VERSION__\n except AttributeError:\n return 'unknown version'\n\n\n\n def install(self):\n if self.installed:\n return\n\n print(\"-> Installing {0} with pip\".format(self.module))\n\n try:\n check_call(['pip', 'install', self.package])\n except CalledProcessError:\n self.failed = True\n\n def help(self):\n result = \"\"\"\n{module}:\n******************\n\n{info}\n\nPIP package name:\n{package}\n\"\"\".format(module=self.module, info=self.info, package=self.package)\n return result\n\n def __str__(self):\n if self.installed:\n status = 'INSTALLED (%s)' % self.version\n elif self.failed:\n status = 'FAILED (%s)' % self.info\n else:\n status = 'MISSING (%s)' % self.info\n return \"%20s:\\t%s\" % (self.module, status)\n\n\nclass QtDependency(Dependency):\n\n def install(self):\n print(\"-> Cannot install {0} automatically - skipping\".format(self.module))\n\n def __str__(self):\n if self.installed:\n status = 'INSTALLED (%s)' % self.version\n else:\n status = 'NOT INSTALLED'\n return \"%20s:\\t%s\" % (self.module, status)\n\n\nclass PyQt4(QtDependency):\n\n @property\n def version(self):\n try:\n from PyQt4 import Qt\n return \"PyQt: {0} - Qt: {1}\".format(Qt.PYQT_VERSION_STR, Qt.QT_VERSION_STR)\n except (ImportError, AttributeError):\n return 'unknown version'\n\n\nclass PyQt5(QtDependency):\n\n @property\n def version(self):\n try:\n from PyQt5 import Qt\n return \"PyQt: {0} - Qt: {1}\".format(Qt.PYQT_VERSION_STR, Qt.QT_VERSION_STR)\n except (ImportError, AttributeError):\n return 'unknown version'\n\n\nclass PySide(QtDependency):\n\n @property\n def version(self):\n try:\n import PySide\n from PySide import QtCore\n return \"PySide: {0} - Qt: {1}\".format(PySide.__version__, QtCore.__version__)\n except (ImportError, AttributeError):\n return 'unknown version'\n\n\n# Add any dependencies here\n# Make sure to add new categories to the categories tuple\ngui_framework = (\n PyQt4('PyQt4', ''),\n PyQt5('PyQt5', ''),\n PySide('PySide', '')\n)\n\nrequired = (\n Dependency('qtpy', 'Required', min_version='1.1'),\n Dependency('setuptools', 'Required', min_version='1.0'),\n Dependency('numpy', 'Required', min_version='1.9'),\n Dependency('matplotlib', 'Required for plotting', min_version='1.4'),\n Dependency('pandas', 'Adds support for Excel files and DataFrames', min_version='0.14'),\n Dependency('astropy', 'Used for FITS I/O, table reading, and WCS Parsing', min_version='1.3'),\n Dependency('dill', 'Used when saving Glue sessions', min_version='0.2'),\n Dependency('h5py', 'Used to support HDF5 files', min_version='2.4'),\n Dependency('xlrd', 'Used to support Excel files', min_version='1.0'),\n Dependency('glue_vispy_viewers', '3D viewers for glue', 'glue-vispy-viewers', min_version='0.6')\n)\n\ngeneral = (\n Dependency('scipy', 'Used for some image processing calculation'),\n Dependency('skimage',\n 'Used to read popular image formats (jpeg, png, etc.)',\n 'scikit-image'))\n\n\nipython = (\n Dependency('IPython', 'Needed for interactive IPython terminal', min_version='4'),\n Dependency('qtconsole', 'Needed for interactive IPython terminal'),\n Dependency('ipykernel', 'Needed for interactive IPython terminal'),\n Dependency('traitlets', 'Needed for interactive IPython terminal'),\n Dependency('pygments', 'Needed for interactive IPython terminal'),\n Dependency('zmq', 'Needed for interactive IPython terminal', 'pyzmq'))\n\n\nastronomy = (\n Dependency('pyavm', 'Used to parse AVM metadata in image files', 'PyAVM'),\n Dependency('spectral_cube', 'Used to read in spectral cubes', 'spectral-cube'),\n Dependency('astrodendro', 'Used to read in and represent dendrograms', 'astrodendro'))\n\n\ntesting = (\n Dependency('mock', 'Used in test code'),\n Dependency('pytest', 'Used in test code'))\n\nexport = (\n Dependency('plotly', 'Used to explort plots to Plot.ly'),\n)\n\ncategories = (('gui framework', gui_framework),\n ('required', required),\n ('ipython terminal', ipython),\n ('general', general),\n ('astronomy', astronomy),\n ('testing', testing),\n ('export', export))\n\ndependencies = dict((d.module, d) for c in categories for d in c[1])\n\n\ndef get_status():\n s = \"\"\n for category, deps in categories:\n s += \"%21s\" % category.upper() + os.linesep\n for dep in deps:\n s += str(dep) + os.linesep\n s += os.linesep\n return s\n\n\ndef get_status_as_odict():\n status = OrderedDict()\n for category, deps in categories:\n for dep in deps:\n if dep.installed:\n status[dep.module] = dep.version\n else:\n status[dep.module] = \"Not installed\"\n return status\n\n\ndef show_status():\n print(get_status())\n\n\ndef install_all():\n for category, deps in categories:\n for dep in deps:\n dep.install()\n\n\ndef install_selected(modules):\n modules = set(m.lower() for m in modules)\n\n for category, deps in categories:\n for dep in deps:\n if dep.installed:\n continue\n if dep.module.lower() in modules or category.lower() in modules:\n dep.install()\n\n\ndef main(argv=None):\n argv = argv or sys.argv\n\n usage = \"\"\"usage:\n #install all dependencies\n %s install\n\n #show all dependencies\n %s list\n\n #install a specific dependency or category\n %s install astropy\n %s install astronomy\n\n #display information about a dependency\n %s info astropy\n\"\"\" % ('glue-deps', 'glue-deps', 'glue-deps', 'glue-deps', 'glue-deps')\n\n if len(argv) < 2 or argv[1] not in ['install', 'list', 'info']:\n sys.stderr.write(usage)\n sys.exit(1)\n\n if argv[1] == 'info':\n if len(argv) != 3:\n sys.stderr.write(usage)\n sys.stderr.write(\"Please specify a dependency\\n\")\n sys.exit(1)\n\n dep = dependencies.get(argv[2], None)\n\n if dep is None:\n sys.stderr.write(\"Unrecognized dependency: %s\\n\" % argv[2])\n sys.exit(1)\n\n print(dep.help())\n sys.exit(0)\n\n if argv[1] == 'list':\n show_status()\n sys.exit(0)\n\n # argv[1] == 'install'\n if len(argv) == 2:\n install_all()\n show_status()\n sys.exit(0)\n\n install_selected(argv[2:])\n show_status()\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "glue/_deps.py", "file_name": "_deps.py", "file_ext": "py", "file_size_in_byte": 7797, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "importlib.import_module", "line_number": 30, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 57, "usage_type": "call"}, {"api_name": "subprocess.CalledProcessError", "line_number": 58, "usage_type": "name"}, {"api_name": "PyQt4.Qt.PYQT_VERSION_STR", "line_number": 102, "usage_type": "attribute"}, {"api_name": "PyQt4.Qt", "line_number": 102, "usage_type": "name"}, {"api_name": "PyQt4.Qt.QT_VERSION_STR", "line_number": 102, "usage_type": "attribute"}, {"api_name": "PyQt5.Qt.PYQT_VERSION_STR", "line_number": 113, "usage_type": "attribute"}, {"api_name": "PyQt5.Qt", "line_number": 113, "usage_type": "name"}, {"api_name": "PyQt5.Qt.QT_VERSION_STR", "line_number": 113, "usage_type": "attribute"}, {"api_name": "PySide.__version__", "line_number": 125, "usage_type": "attribute"}, {"api_name": "PySide.QtCore.__version__", "line_number": 125, "usage_type": "attribute"}, {"api_name": "PySide.QtCore", "line_number": 125, "usage_type": "name"}, {"api_name": "{'Qt': 'PyQt4.Qt'}", "line_number": 133, "usage_type": "call"}, {"api_name": "{'Qt': 'PyQt5.Qt'}", "line_number": 134, "usage_type": "call"}, {"api_name": "{'PySide': 'PySide', 'QtCore': 'PySide.QtCore'}", "line_number": 135, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 195, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 197, "usage_type": "attribute"}, {"api_name": "os.linesep", "line_number": 198, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 203, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 235, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 253, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 253, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 254, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 258, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 258, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 259, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 259, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 260, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 265, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 265, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 266, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 269, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 273, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 279, "usage_type": "call"}]} +{"seq_id": "577848300", "text": "import aiohttp\nimport discord\nimport json\nimport logging\nimport mimetypes\nimport random\nimport re\n\nfrom discord.ext import commands\nfrom random import choice\nfrom .utils.gets import getWithoutInvoke\nfrom .utils.helper import edit, embedColor\n\nlog = logging.getLogger('LOG')\n\n\nclass Misc:\n def __init__(self, bot):\n self.bot = bot\n self.ball = [\"As I see it, yes\", \"It is certain\", \"It is decidedly so\", \"Most likely\", \"Outlook good\",\n \"Signs point to yes\", \"Without a doubt\", \"Yes\", \"Yes – definitely\", \"You may rely on it\", \"Reply hazy, try again\",\n \"Ask again later\", \"Better not tell you now\", \"Cannot predict now\", \"Concentrate and ask again\",\n \"Don't count on it\", \"My reply is no\", \"My sources say no\", \"Outlook not so good\", \"Very doubtful\"]\n self.regionals = {'A': '\\N{REGIONAL INDICATOR SYMBOL LETTER A}', 'B': '\\N{REGIONAL INDICATOR SYMBOL LETTER B}', 'C': '\\N{REGIONAL INDICATOR SYMBOL LETTER C}',\n 'D': '\\N{REGIONAL INDICATOR SYMBOL LETTER D}', 'E': '\\N{REGIONAL INDICATOR SYMBOL LETTER E}', 'F': '\\N{REGIONAL INDICATOR SYMBOL LETTER F}',\n 'G': '\\N{REGIONAL INDICATOR SYMBOL LETTER G}', 'H': '\\N{REGIONAL INDICATOR SYMBOL LETTER H}', 'I': '\\N{REGIONAL INDICATOR SYMBOL LETTER I}',\n 'J': '\\N{REGIONAL INDICATOR SYMBOL LETTER J}', 'K': '\\N{REGIONAL INDICATOR SYMBOL LETTER K}', 'L': '\\N{REGIONAL INDICATOR SYMBOL LETTER L}',\n 'M': '\\N{REGIONAL INDICATOR SYMBOL LETTER M}', 'N': '\\N{REGIONAL INDICATOR SYMBOL LETTER N}', 'O': '\\N{REGIONAL INDICATOR SYMBOL LETTER O}',\n 'P': '\\N{REGIONAL INDICATOR SYMBOL LETTER P}', 'Q': '\\N{REGIONAL INDICATOR SYMBOL LETTER Q}', 'R': '\\N{REGIONAL INDICATOR SYMBOL LETTER R}',\n 'S': '\\N{REGIONAL INDICATOR SYMBOL LETTER S}', 'T': '\\N{REGIONAL INDICATOR SYMBOL LETTER T}', 'U': '\\N{REGIONAL INDICATOR SYMBOL LETTER U}',\n 'V': '\\N{REGIONAL INDICATOR SYMBOL LETTER V}', 'W': '\\N{REGIONAL INDICATOR SYMBOL LETTER W}', 'X': '\\N{REGIONAL INDICATOR SYMBOL LETTER X}',\n 'Y': '\\N{REGIONAL INDICATOR SYMBOL LETTER Y}', 'Z': '\\N{REGIONAL INDICATOR SYMBOL LETTER Z}'}\n self.numbers = {'0': '0⃣', '1': '1⃣', '2': '2⃣', '3': '3⃣', '4': '4⃣', '5': '5⃣', '6': '6⃣', '7': '7⃣', '8': '8⃣', '9': '9⃣'}\n self.emoji_reg = re.compile(r'<:.+?:([0-9]{15,21})>')\n self.link = re.compile(r'^(?:http|ftp)s?://'\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|'\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})'\n r'(?::\\d+)?'\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n # Sends a googleitfor.me link with the specified tags\n @commands.command(aliases=[\"L2g\"])\n async def l2g(self, ctx, *, msg: str):\n \"\"\"Links to lmgtfy.\"\"\"\n lmgtfy = 'http://googleitfor.me/?q='\n words = msg.lower().strip().split(' ')\n for word in words:\n lmgtfy += word + '+'\n await edit(ctx, content=lmgtfy[:-1])\n\n # Picks a random answer from a list of options\n @commands.command(aliases=[\"Choose\"])\n async def choose(self, ctx, *, choices: str):\n \"\"\"Chooses one from many possibilities.\"\"\"\n choiceslist = choices.split(\"|\")\n choice = random.choice(choiceslist)\n if len(choiceslist) < 2:\n await edit(ctx, content=\"2+ Options, separated with ``|``\", ttl=5)\n else:\n em = discord.Embed(colour=embedColor(self))\n em.add_field(name=\"Options\", value=choices, inline=False)\n em.add_field(name=\"Choice\", value=\"<:clyde:273922151856209923> | My Answer is ``{}``\".format(choice))\n await edit(ctx, embed=em)\n\n # 8ball\n @commands.command(name=\"8\", aliases=[\"8ball\"])\n async def _8ball(self, ctx, *, question: str):\n \"\"\"Typical 8ball like you know it.\"\"\"\n if question.endswith(\"?\") and question != \"?\":\n await edit(ctx, content=\"`\" + choice(self.ball) + \"`\")\n else:\n await edit(ctx, content=\"That doesn't look like a question.\", ttl=3)\n\n # Urbandictionary\n @commands.command(aliases=[\"Urban\"])\n async def urban(self, ctx, *, search_terms: str, definition_number: int=1):\n \"\"\"Get an Urban Dictionary entry.\"\"\"\n search_terms = search_terms.split(\" \")\n try:\n if len(search_terms) > 1:\n pos = int(search_terms[-1]) - 1\n search_terms = search_terms[:-1]\n else:\n pos = 0\n if pos not in range(0, 11):\n pos = 0\n except ValueError:\n pos = 0\n search_terms = \"+\".join(search_terms)\n url = \"http://api.urbandictionary.com/v0/define?term=\" + search_terms\n try:\n async with aiohttp.ClientSession() as cs:\n async with cs.get(url) as r:\n result = json.loads(await r.text())\n if result[\"list\"]:\n definition = result['list'][pos]['definition']\n example = result['list'][pos]['example']\n defs = len(result['list'])\n embed = discord.Embed(title='Definition #{} out of {}'.format(pos + 1, defs), description=definition, colour=embedColor(self))\n embed.set_author(name=search_terms, icon_url='https://i.imgur.com/bLf4CYz.png')\n embed.add_field(name=\"Example:\", value=example, inline=False)\n await edit(ctx, embed=embed)\n else:\n await edit(ctx, content=\"Your search terms gave no results.\", ttl=3)\n except IndexError:\n await edit(ctx, content=\"There is no definition #{}\".format(pos + 1), ttl=3)\n except:\n await edit(ctx, content=\"Error.\", ttl=3)\n\n @commands.command(aliases=[\"Gif\"])\n async def gif(self, ctx, *text):\n \"\"\"Get a gif from Giphy.\"\"\"\n if text:\n if len(text[0]) > 1 and len(text[0]) < 20:\n try:\n msg = \"+\".join(text)\n search = \"http://api.giphy.com/v1/gifs/search?q=\" + msg + \"&api_key=dc6zaTOxFJmzC\"\n async with aiohttp.ClientSession() as cs:\n async with cs.get(search) as r:\n result = json.loads(await r.text())\n if result[\"data\"] != []:\n await edit(ctx, embed=discord.Embed(color=embedColor(self)).set_image(url=result[\"data\"][0][\"images\"][\"original\"][\"url\"]))\n else:\n await edit(ctx, content=\"Your search terms gave no results.\", ttl=3)\n except:\n await edit(ctx, content=\"Error.\", ttl=3)\n else:\n await edit(ctx, content=\"Invalid search.\", ttl=3)\n else:\n await edit(ctx, content=\"\\N{HEAVY EXCLAMATION MARK SYMBOL} Specify Search\", ttl=3)\n\n def to_reginals(self, content, react):\n emote_list = []\n for i in content.split(\" \"):\n if self.emoji_reg.findall(i):\n emote_list.append(self.bot.get_emoji(int(self.emoji_reg.findall(i)[0])))\n else:\n for x in list(i):\n if x.isalpha():\n emote_list.append(self.regionals[x.upper()])\n elif x.isdigit():\n emote_list.append(self.numbers[x])\n elif react is False:\n emote_list.append(x)\n return emote_list\n\n @commands.command(aliases=[\"React\"])\n async def react(self, ctx):\n \"\"\"React to a Message with Text.\"\"\"\n await ctx.message.delete()\n msg = getWithoutInvoke(ctx)\n split = msg.split()\n _id = None\n if len(split) > 1 and split[-1].isdigit():\n _id = int(split[-1])\n msg = msg.strip(str(_id))\n reactions = self.to_reginals(msg, True)\n limit = 25 if _id else 2\n async for message in ctx.message.channel.history(limit=limit):\n if (not _id and message.id != ctx.message.id) or (_id == message.id):\n for i in reactions:\n await message.add_reaction(i)\n\n @commands.command(aliases=[\"Regional\", \"Regionals\", \"regionals\"])\n async def regional(self, ctx, *, msg: str):\n \"\"\"Convert a Text to emotes.\"\"\"\n regional_list = self.to_reginals(msg, False)\n regional_output = []\n for i in regional_list:\n regional_output.append(\" \")\n if isinstance(i, discord.Emoji):\n regional_output.append(str(i))\n else:\n regional_output.append(i)\n await edit(ctx, content=''.join(regional_output))\n\n @commands.command(aliases=[\"Embed\"])\n async def embed(self, ctx, *, msg: str):\n \"\"\"Embed a Text.\"\"\"\n try:\n await edit(ctx, embed=discord.Embed(description=msg, colour=embedColor(self)))\n except:\n await edit(ctx, content=\"\\N{HEAVY EXCLAMATION MARK SYMBOL} Something went wrong\", ttl=5)\n\n @commands.command(aliases=[\"Eimage\", \"Ei\", \"ei\"])\n async def eimage(self, ctx, *, msg: str):\n \"\"\"Embed an image.\"\"\"\n link = self.link.findall(msg)\n if link:\n mimetype, encoding = mimetypes.guess_type(link[0])\n if mimetype and mimetype.startswith('image'):\n try:\n await edit(ctx, embed=discord.Embed(colour=embedColor(self)).set_image(url=link[0]))\n except:\n await edit(ctx, content=\"\\N{HEAVY EXCLAMATION MARK SYMBOL} Something went wrong\", ttl=5)\n else:\n await edit(ctx, content=\"\\N{HEAVY EXCLAMATION MARK SYMBOL} No image link\", ttl=5)\n else:\n await edit(ctx, content=\"\\N{HEAVY EXCLAMATION MARK SYMBOL} No link given\", ttl=5)\n\n\ndef setup(bot):\n bot.add_cog(Misc(bot))\n", "sub_path": "cogs/misc.py", "file_name": "misc.py", "file_ext": "py", "file_size_in_byte": 10206, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 34, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 35, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "utils.helper.edit", "line_number": 50, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 43, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 43, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 57, "usage_type": "name"}, {"api_name": "utils.helper.edit", "line_number": 59, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 61, "usage_type": "call"}, {"api_name": "utils.helper.embedColor", "line_number": 61, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 63, "usage_type": "argument"}, {"api_name": "utils.helper.edit", "line_number": 64, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 53, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 53, "usage_type": "name"}, {"api_name": "utils.helper.edit", "line_number": 71, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 71, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 73, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 67, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 67, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 93, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 95, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 100, "usage_type": "call"}, {"api_name": "utils.helper.embedColor", "line_number": 100, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 103, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 105, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 107, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 109, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 76, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 76, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 119, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 121, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 123, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 123, "usage_type": "call"}, {"api_name": "utils.helper.embedColor", "line_number": 123, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 125, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 127, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 129, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 131, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 111, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 111, "usage_type": "name"}, {"api_name": "utils.gets.getWithoutInvoke", "line_number": 152, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 148, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 148, "usage_type": "name"}, {"api_name": "discord.Emoji", "line_number": 172, "usage_type": "attribute"}, {"api_name": "utils.helper.edit", "line_number": 176, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 165, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 165, "usage_type": "name"}, {"api_name": "utils.helper.edit", "line_number": 182, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 182, "usage_type": "call"}, {"api_name": "utils.helper.embedColor", "line_number": 182, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 184, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 178, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 178, "usage_type": "name"}, {"api_name": "mimetypes.guess_type", "line_number": 191, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 194, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 194, "usage_type": "call"}, {"api_name": "utils.helper.embedColor", "line_number": 194, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 196, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 198, "usage_type": "call"}, {"api_name": "utils.helper.edit", "line_number": 200, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 186, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 186, "usage_type": "name"}]} +{"seq_id": "124707937", "text": "# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n# license removed for brevity\n# //======================================================================//\n# // This software is free: you can redistribute it and/or modify //\n# // it under the terms of the GNU General Public License Version 3, //\n# // as published by the Free Software Foundation. //\n# // This software is distributed in the hope that it will be useful, //\n# // but WITHOUT ANY WARRANTY; without even the implied warranty of //\n# // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.. See the //\n# // GNU General Public License for more details. //\n# // You should have received a copy of the GNU General Public License //\n# // Version 3 in the file COPYING that came with this distribution. //\n# // If not, see //\n# //======================================================================//\n# // //\n# // Copyright (c) 2019 SinfonIA Pepper RoboCup Team //\n# // Sinfonia - Colombia //\n# // https://sinfoniateam.github.io/sinfonia/index.html //\n# // //\n# //======================================================================//\n\nfrom person_cloud import PersonCloud\nfrom person_local import PersonLocal\nfrom person_cloud import Less_Blurred\nfrom edit_files import Group\nimport cv2 as cv2\nimport sys\nimport os\nimport json\n\n\n# import unicodedata\n\nclass Characterization:\n def __init__(self,source):\n self.ROOT_PATH = os.path.dirname(sys.modules['__main__'].__file__)\n print(self.ROOT_PATH)\n n_imas, percent, n_train = self.get_parameters()\n self.n_images_to_take = n_imas\n self.percent_of_face = percent\n self.n_images_to_train = n_train\n self.source = source\n self.persons = self.setPersonSource()\n print(self.n_images_to_take, self.n_images_to_train, self.percent_of_face) \n\n def setPersonSource(self):\n print(\"Use {} enviroment\".format(self.source))\n if (self.source == 'local'):\n return PersonLocal()\n if (self.source == 'cloud' ):\n return PersonCloud()\n\n def get_parameters(self):\n print (self.ROOT_PATH)\n with open(\"Resources/interaction_parameters.json\") as f:\n secretInfo = json.load(f)\n print(\"Interaction parameters: \", secretInfo)\n return secretInfo[\"n_images_to_take\"], secretInfo[\"percent_of_face\"], secretInfo[\"n_images_to_train\"]\n\n def detect_person(self, frame):\n people = self.persons.detectPerson(frame)\n return people\n\n def indentify_person(self, frame):\n people = self.persons.identifyPerson(frame)\n return people\n\n def add_person(self, name, images):\n # blurred = Less_Blurred(len(images))\n # images = blurred.sort_less_blurred(images)\n person = self.persons.enrol(name, images)\n return person\n\n def get_persons(self):\n personsList = self.persons.persons_in_group()\n for p in personsList:\n print(p)\n return personsList\n\n def delete_person(self, name):\n self.persons.delete_person_by_name(name)\n \n def delete_all_person(self):\n pass\n #delete all person group\n\n def get_persons_attributes(self):\n G = Group()\n for p in G.persons:\n print(p)\n return G.persons\n \n\n\n# c = Characterization(\"local\")\n# c.get_persons()\n# c.indentify_person(True)", "sub_path": "Class/characterization.py", "file_name": "characterization.py", "file_ext": "py", "file_size_in_byte": 3719, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.dirname", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.modules", "line_number": 37, "usage_type": "attribute"}, {"api_name": "person_local.PersonLocal", "line_number": 50, "usage_type": "call"}, {"api_name": "person_cloud.PersonCloud", "line_number": 52, "usage_type": "call"}, {"api_name": "json.load", "line_number": 57, "usage_type": "call"}, {"api_name": "edit_files.Group", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "180876637", "text": "from django.db import models\nfrom datetime import date\n\n\nclass MostValuablePlayer(models.Model):\n \"\"\"MVP Игроки\"\"\"\n nickname = models.CharField(\"Ник\", max_length=150)\n first_name = models.CharField(\"Имя\", max_length=100, default=\"NNN\")\n last_name = models.CharField(\"Фамилия\", max_length=100, default=\"NNN\")\n\n def __str__(self):\n return self.nickname\n\n class Meta:\n verbose_name = \"MVP player\"\n verbose_name_plural = \"MVP players\"\n\n\nclass Match(models.Model):\n \"\"\"Матчи\"\"\"\n teams = models.CharField(\"Команды\", max_length=300, default=\"ZZS - \")\n score = models.CharField(\"Счет\", max_length=50)\n mvp_player = models.ForeignKey(MostValuablePlayer, verbose_name=\"MVP игрок\", on_delete=models.SET_NULL, null=True)\n win_result = models.BooleanField(\"Победа\", default=True)\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.teams\n\n class Meta:\n verbose_name = \"Match\"\n verbose_name_plural = \"Matches\"\n\n\nclass Team(models.Model):\n \"\"\"Команды\"\"\"\n name = models.CharField(\"Название\", max_length=150)\n description = models.TextField(\"Описание\")\n team_started = models.DateField(\"Дата основания\", default=date.today)\n matches = models.ManyToManyField(Match, verbose_name=\"Матчи\", related_name=\"teams_games\")\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.description\n\n class Meta:\n verbose_name = \"Team\"\n verbose_name_plural = \"Teams\"\n\n\nclass Event(models.Model):\n \"\"\"Турниры\"\"\"\n name = models.CharField(\"Название\", max_length=250)\n description = models.TextField(\"Описание\")\n date = models.DateField(\"Дата проведения\", default=date.today)\n prize_pool = models.IntegerField(\"Призовой фонд\", default=0, help_text=\"указывать сумму в тенге\")\n lan = models.BooleanField(\"True- LAN, False- Online\", default=False)\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"Event\"\n verbose_name_plural = \"Events\"\n\n\nclass Stat(models.Model):\n \"\"\"Статистика\"\"\"\n players_stat = models.ForeignKey(\n MostValuablePlayer, verbose_name=\"Статистика игрока\", on_delete=models.SET_NULL, null=True\n )\n kd_ratio = models.FloatField(\"КД\", default=1.00)\n head_shots = models.FloatField(\"Процент хедшотов\", default=0.00)\n maps_played = models.IntegerField(\"Сыгранно карт\", default=0)\n elo = models.PositiveSmallIntegerField(\"ОЧКОв ЭЛО\", default=1000)\n face_it_lvl = models.PositiveSmallIntegerField(\"Уровень на FACEIT\", default=3)\n\n def __str__(self):\n return f\"{self.kd_ratio}\"\n\n class Meta:\n verbose_name = \"Stat\"\n verbose_name_plural = \"Stats\"\n\n\nclass Player(models.Model):\n \"\"\"Игроки\"\"\"\n player = models.ForeignKey(MostValuablePlayer, verbose_name=\"игрок\", on_delete=models.CASCADE)\n age = models.PositiveSmallIntegerField(\"Возраст\", default=16)\n role = models.CharField(\"Роль\", max_length=150)\n photo = models.ImageField(\"Фото\", upload_to=\"player/\")\n active_period = models.CharField(\"Период активности\", max_length=150)\n country = models.CharField(\"Страна\", max_length=100)\n team = models.ManyToManyField(Team, verbose_name=\"команда\", related_name=\"team_player\")\n stat = models.ForeignKey(Stat, verbose_name=\"статистика\", on_delete=models.CASCADE)\n event = models.ManyToManyField(Event, verbose_name=\"турниры\", related_name=\"event_player\")\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return f\"{self.player}\"\n\n class Meta:\n verbose_name = \"Player\"\n verbose_name_plural = \"Players\"\n\n\nclass Highlight(models.Model):\n \"\"\"Хайлайты\"\"\"\n player = models.ForeignKey(Player, verbose_name=\"игрок\", on_delete=models.SET_NULL, null=True)\n match = models.ForeignKey(Match, verbose_name=\"матч\", on_delete=models.SET_NULL, null=True)\n description = models.TextField(\"Название\")\n preview = models.ImageField(\"Превью\", upload_to=\"preview/\")\n url = models.SlugField(max_length=160, unique=True)\n\n def __str__(self):\n return self.description\n\n class Meta:\n verbose_name = \"Highlight\"\n verbose_name_plural = \"Highlights\"\n\n\nclass Comments(models.Model):\n \"\"\"Коментарии\"\"\"\n name = models.CharField(\"Имя\", max_length=150)\n email = models.EmailField()\n text = models.TextField(\"Текст\")\n parent = models.ForeignKey('self', verbose_name=\"Родитель\", on_delete=models.SET_NULL, blank=True, null=True)\n highlight = models.ForeignKey(Highlight, verbose_name=\"хайлайт\", on_delete=models.CASCADE)\n\n def __str__(self):\n return f\"{self.name} - {self.highlight}\"\n\n class Meta:\n verbose_name = \"Comment\"\n verbose_name_plural = \"Comments\"", "sub_path": "zzs/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 5193, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.db.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.db.models.BooleanField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 39, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.db.models.IntegerField", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 68, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 70, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 71, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 73, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 74, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 74, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 75, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 75, "usage_type": "name"}, {"api_name": "django.db.models.PositiveSmallIntegerField", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.models.PositiveSmallIntegerField", "line_number": 77, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 77, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 87, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 87, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 89, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 89, "usage_type": "attribute"}, {"api_name": "django.db.models.PositiveSmallIntegerField", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 90, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 92, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 92, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 93, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 93, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 94, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 94, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 95, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 95, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 96, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 96, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 96, "usage_type": "attribute"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 97, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 97, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 98, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 98, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 108, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 108, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 110, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 110, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 110, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 111, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 111, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 111, "usage_type": "attribute"}, {"api_name": "django.db.models.TextField", "line_number": 112, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 112, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 113, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 113, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 114, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 114, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 124, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 124, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 126, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 126, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 127, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 127, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 128, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 128, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 129, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 129, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 129, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 130, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 130, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 130, "usage_type": "attribute"}]} +{"seq_id": "624322120", "text": "import os\nimport sys\nimport gzip\nimport fnmatch\nfrom datetime import datetime\nfrom termcolor import colored\nfrom singularity_request import get_json_response\n\nBASE_URI_FORMAT = '{0}{1}'\nALL_REQUESTS = '/requests'\nREQUEST_TASKS_FORMAT = '/history/request/{0}/tasks'\nACTIVE_TASKS_FORMAT = '/history/request/{0}/tasks/active'\n\ndef unpack_logs(logs):\n for zipped_file in logs:\n try:\n if os.path.isfile(zipped_file):\n file_in = gzip.open(zipped_file, 'rb')\n unzipped = zipped_file.replace('.gz', '.log')\n file_out = open(unzipped, 'wb')\n file_out.write(file_in.read())\n file_out.close()\n file_in.close\n os.remove(zipped_file)\n sys.stderr.write(colored('Unpacked ', 'green') + colored(zipped_file, 'white') + '\\n')\n except:\n if os.path.isfile(zipped_file):\n os.remove(zipped_file)\n sys.stderr.write(colored('Could not unpack {0}'.format(zipped_file), 'red') + '\\n')\n continue\n\ndef base_uri(args):\n if not args.singularity_uri_base:\n exit(\"Specify a base uri for Singularity (-u)\")\n uri_prefix = \"\" if args.singularity_uri_base.startswith((\"http://\", \"https://\")) else \"http://\"\n return BASE_URI_FORMAT.format(uri_prefix, args.singularity_uri_base)\n\ndef tasks_for_requests(args):\n all_tasks = []\n for request in all_requests(args):\n if args.requestId and args.deployId:\n tasks = [task[\"taskId\"][\"id\"] for task in all_tasks_for_request(args, request) if log_matches(task[\"taskId\"][\"deployId\"], args.deployId)]\n else:\n tasks = [task[\"taskId\"][\"id\"] for task in all_tasks_for_request(args, request)]\n tasks = tasks[0:args.task_count] if hasattr(args, 'task_count') else tasks\n all_tasks = all_tasks + tasks\n return all_tasks\n\ndef log_matches(inputString, pattern):\n return fnmatch.fnmatch(inputString, pattern) or fnmatch.fnmatch(inputString, pattern + '*.gz')\n\ndef all_tasks_for_request(args, request):\n uri = '{0}{1}'.format(base_uri(args), ACTIVE_TASKS_FORMAT.format(request))\n active_tasks = get_json_response(uri)\n if hasattr(args, 'start_days'):\n uri = '{0}{1}'.format(base_uri(args), REQUEST_TASKS_FORMAT.format(request))\n historical_tasks = get_json_response(uri)\n if len(historical_tasks) == 0:\n return active_tasks\n elif len(active_tasks) == 0:\n return historical_tasks\n else:\n return active_tasks + [h for h in historical_tasks if is_in_date_range(args, int(str(h['updatedAt'])[0:-3]))]\n else:\n return active_tasks\n\ndef all_requests(args):\n uri = '{0}{1}'.format(base_uri(args), ALL_REQUESTS)\n requests = get_json_response(uri)\n included_requests = []\n for request in requests:\n if fnmatch.fnmatch(request['request']['id'], args.requestId):\n included_requests.append(request['request']['id'])\n return included_requests\n\ndef is_in_date_range(args, timestamp):\n timedelta = datetime.utcnow() - datetime.utcfromtimestamp(timestamp)\n if args.end_days:\n return False if timedelta.days > args.start_days or timedelta.days <= args.end_days else True\n else:\n return False if timedelta.days > args.start_days else True\n", "sub_path": "scripts/logfetch/logfetch_base.py", "file_name": "logfetch_base.py", "file_ext": "py", "file_size_in_byte": 3101, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.isfile", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 18, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 25, "usage_type": "attribute"}, {"api_name": "termcolor.colored", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 29, "usage_type": "attribute"}, {"api_name": "termcolor.colored", "line_number": 29, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 50, "usage_type": "call"}, {"api_name": "singularity_request.get_json_response", "line_number": 54, "usage_type": "call"}, {"api_name": "singularity_request.get_json_response", "line_number": 57, "usage_type": "call"}, {"api_name": "singularity_request.get_json_response", "line_number": 69, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "7904660", "text": "#!/usr/bin/python3\n\n'''\nDescription: Python script to query IPv4 , Domain, or URL functions through the use of apivoid.com's Threat Analysis APIs\nReference: https://www.apivoid.com/\nAuthor: Kris Rostkowski\n'''\n\n# imported modules\nimport requests\nimport json\nimport keyring\nimport re\nimport argparse\nfrom pprint import pprint\n\n\n# argparse function\n# setup multiple arguments to call different apivoid functions\ndef get_args():\n\n parser = argparse.ArgumentParser(prog=\"apivoid lookup script\",\n description=\"Python script to query IPv4 , Domain, or URL functions through the use of apivoid.com's Threat Analysis APIs \")\n parser.add_argument(\"-i\", \"--ipaddress\", help=\"Input IPv4 address to run the IP reputation function\")\n parser.add_argument(\"-d\", \"--domain\", help=\"Input domain to run the domain blacklist function\")\n parser.add_argument(\"-u\", \"--url\", help=\"Input URL to run the URL reputation function\")\n parser.add_argument(\"-sT\", \"--sitetrust\", help=\"Input domain to run the Site Trustworthiness record function\")\n parser.add_argument(\"-t\", \"--threatlog\", help=\"Input domain to run the ThreatLog function\")\n parser.add_argument(\"-sL\", \"--ssl\", help=\"Input domain to run the SSL lookup function\")\n\n args = parser.parse_args()\n\n return args\n\n\n# check for valid IP address, supply error message, kill process\ndef ip_check(ip):\n\n ip_re = re.compile(\n \"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$\")\n if ip_re.match(ip):\n return ip\n else:\n print(f\"\\nprocess killed, {ip} is not an ip address\")\n exit()\n\n\n# check for valid URLs, supply error message, kill process\ndef url_check(url):\n\n url_regex = re.compile(\"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\")\n if url_regex.match(url):\n return url\n else:\n print(f\"\\nprocess killed, {url} is not a valid URL. Please add http(s):// to your URL.\")\n exit()\n\n\n# ip reputation function\ndef ip_reputation(ip_rep):\n\n token = keyring.get_password(\"apivoid\", \"username\")\n base_url = \"https://endpoint.apivoid.com/iprep/v1/pay-as-you-go/?key=\"\n request = requests.get(base_url + token + \"&ip=\" + ip_rep)\n data = json.loads(request.text)\n anonymity = (data[\"data\"][\"report\"][\"anonymity\"])\n blacklists = (data[\"data\"][\"report\"][\"blacklists\"])\n information = (data[\"data\"][\"report\"][\"information\"])\n\n # deleting unwanted data from dictionaries\n del (data[\"data\"][\"report\"][\"blacklists\"][\"engines\"])\n\n print(\"\\nAnonymity Information:\")\n for key, value in anonymity.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nIP Blacklist:\")\n for key, value in blacklists.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nServer Information:\")\n for key, value in information.items():\n print(\" \", key, \":\", value)\n\n\n# domain reputation function\ndef d_reputation(d_rep):\n\n token = keyring.get_password(\"apivoid\", \"username\")\n base_url = \"https://endpoint.apivoid.com/domainbl/v1/pay-as-you-go/?key=\"\n request = requests.get(base_url + token + \"&host=\" + d_rep)\n data = json.loads(request.text)\n report = (data[\"data\"][\"report\"])\n blacklists = (data[\"data\"][\"report\"][\"blacklists\"])\n category = (data[\"data\"][\"report\"][\"category\"])\n server = (data[\"data\"][\"report\"][\"server\"])\n\n # deleting unwanted data from dictionaries\n del (data[\"data\"][\"report\"][\"blacklists\"][\"engines\"])\n del (data[\"data\"][\"report\"][\"blacklists\"])\n del (data[\"data\"][\"report\"][\"category\"])\n del (data[\"data\"][\"report\"][\"server\"])\n\n print(\"\\nReport:\")\n for key, value in report.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nDomain Blacklist:\")\n for key, value in blacklists.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nCategory:\")\n for key, value in category.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nServer Information:\")\n for key, value in server.items():\n print(\" \", key, \":\", value)\n\n\n# url reputation function\ndef url_reputation(u_rep):\n\n token = keyring.get_password(\"apivoid\", \"username\")\n base_url = \"https://endpoint.apivoid.com/urlrep/v1/pay-as-you-go/?key=\"\n request = requests.get(base_url + token + \"&url=\" + u_rep)\n data = json.loads(request.text)\n dns_records = (data[\"data\"][\"report\"][\"dns_records\"][\"mx\"])\n ns_lookup = (data[\"data\"][\"report\"][\"dns_records\"][\"ns\"])\n domain_blacklist = (data[\"data\"][\"report\"][\"domain_blacklist\"])\n geo_location = (data[\"data\"][\"report\"][\"geo_location\"])\n risk_score = (data[\"data\"][\"report\"][\"risk_score\"])\n security_checks = (data[\"data\"][\"report\"][\"security_checks\"])\n site_category = (data[\"data\"][\"report\"][\"site_category\"])\n\n # deleting unwanted data from dictionaries\n del (data[\"data\"][\"report\"][\"domain_blacklist\"][\"engines\"])\n\n print(\"\\nDNS Records:\")\n pprint(dns_records) # needs cleaning up\n\n print(\"\\nNS Lookup:\")\n pprint(ns_lookup) # needs cleaning up\n\n print(\"\\nDomain Blacklist:\")\n for key, value in domain_blacklist.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nGeolocation:\")\n for key, value in geo_location.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nRisk Score:\")\n for key, value in risk_score.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nSecurity Checks:\")\n for key, value in security_checks.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nSite Category:\")\n for key, value in site_category.items():\n print(\" \", key, \":\", value)\n\n\n# site trustworthiness function\ndef site_trust(s_trust):\n\n token = keyring.get_password(\"apivoid\", \"username\")\n base_url = \"https://endpoint.apivoid.com/sitetrust/v1/pay-as-you-go/?key=\"\n request = requests.get(base_url + token + \"&host=\" + s_trust)\n data = json.loads(request.text)\n blacklist = (data[\"data\"][\"report\"][\"domain_blacklist\"])\n trust_score = (data[\"data\"][\"report\"][\"trust_score\"])\n server_details = (data[\"data\"][\"report\"][\"server_details\"])\n\n # deleting unwanted data from dictionaries\n del (data[\"data\"][\"report\"][\"domain_blacklist\"][\"engines\"])\n\n print(\"\\nBlacklist:\")\n for key, value in blacklist.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nTrust Score:\")\n for key, value in trust_score.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nServer Details:\")\n for key, value in server_details.items():\n print(\" \", key, \":\", value)\n\n\n# threat log function\ndef threat_log(t_log):\n\n token = keyring.get_password(\"apivoid\", \"username\")\n base_url = \"https://endpoint.apivoid.com/threatlog/v1/pay-as-you-go/?key=\"\n request = requests.get(base_url + token + \"&host=\" + t_log)\n data = json.loads(request.text)\n host = (data[\"data\"])\n threatlog = (data[\"data\"][\"threatlog\"])\n\n # deleting unwanted data from dictionaries\n del (data[\"data\"][\"threatlog\"])\n\n print(\"\\nHost Information:\")\n for key, value in host.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nThreat Log:\")\n for key, value in threatlog.items():\n print(\" \", key, \":\", value)\n\n\n# ssl function\ndef get_ssl(s_look):\n\n token = keyring.get_password(\"apivoid\", \"username\")\n base_url = \"https://endpoint.apivoid.com/sslinfo/v1/pay-as-you-go/?key=\"\n request = requests.get(base_url + token + \"&host=\" + s_look)\n data = json.loads(request.text)\n certificate = (data[\"data\"][\"certificate\"])\n extensions = (data[\"data\"][\"certificate\"][\"details\"][\"extensions\"])\n issuer = (data[\"data\"][\"certificate\"][\"details\"][\"issuer\"])\n signature = (data[\"data\"][\"certificate\"][\"details\"][\"signature\"])\n subject = (data[\"data\"][\"certificate\"][\"details\"][\"subject\"])\n validity = (data[\"data\"][\"certificate\"][\"details\"][\"validity\"])\n\n # deleting unwanted data from dictionaries\n del (data[\"data\"][\"certificate\"][\"details\"])\n\n print(\"\\nCertificate Information:\")\n for key, value in certificate.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nExtensions:\")\n for key, value in extensions.items(): # need to adjust formatting\n print(\" \", key, \":\", value)\n\n print(\"\\nIssuer:\")\n for key, value in issuer.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nSignature Information:\")\n for key, value in signature.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nSubject:\")\n for key, value in subject.items():\n print(\" \", key, \":\", value)\n\n print(\"\\nValidity:\")\n for key, value in validity.items():\n print(\" \", key, \":\", value)\n\n# main\ndef main():\n\n args = get_args()\n ip_rep = args.ipaddress\n d_rep = args.domain\n u_rep = args.url\n s_trust = args.sitetrust\n t_log = args.threatlog\n s_look = args.ssl\n\n if args.ipaddress:\n ip_check(ip_rep)\n ip_reputation(ip_rep)\n elif args.domain:\n d_reputation(d_rep)\n elif args.url:\n url_check(u_rep)\n url_reputation(u_rep)\n elif args.sitetrust:\n site_trust(s_trust)\n elif args.threatlog:\n threat_log(t_log)\n elif args.ssl:\n get_ssl(s_look)\n else:\n print(\"Not a valid request, please try again.\")\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "apivoid.py", "file_name": "apivoid.py", "file_ext": "py", "file_size_in_byte": 9282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 22, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 39, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 51, "usage_type": "call"}, {"api_name": "keyring.get_password", "line_number": 62, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 64, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 65, "usage_type": "call"}, {"api_name": "keyring.get_password", "line_number": 89, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 91, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 92, "usage_type": "call"}, {"api_name": "keyring.get_password", "line_number": 124, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 126, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 127, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 140, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 143, "usage_type": "call"}, {"api_name": "keyring.get_password", "line_number": 169, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 171, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 172, "usage_type": "call"}, {"api_name": "keyring.get_password", "line_number": 196, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 198, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 199, "usage_type": "call"}, {"api_name": "keyring.get_password", "line_number": 218, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 220, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 221, "usage_type": "call"}]} +{"seq_id": "79358393", "text": "import urllib.request\nimport urllib.parse\nimport urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\nimport re\n\n# url_sample = 'http://py4e-data.dr-chuck.net/known_by_Fikret.html'\n# url_real = 'http://py4e-data.dr-chuck.net/known_by_Oakley.html'\n\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter url: ')\nhtml = urllib.request.urlopen(url, context=ctx).read()\nsoup = BeautifulSoup(html, 'lxml')\n\ncount = int(input('Enter count: '))\nposition = int(input('Enter position: '))\n\n\nfor i in range(count):\n\n anchors = soup.findAll('a')\n\n link = anchors[position-1].get('href')\n\n print(re.findall('_([A-z]+).', link))\n\n url = link\n\n html = urllib.request.urlopen(url, context=ctx).read()\n\n soup = BeautifulSoup(html, 'lxml')\n", "sub_path": "week_4/follow_links.py", "file_name": "follow_links.py", "file_ext": "py", "file_size_in_byte": 801, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "ssl.create_default_context", "line_number": 11, "usage_type": "call"}, {"api_name": "ssl.CERT_NONE", "line_number": 13, "usage_type": "attribute"}, {"api_name": "urllib.request.request.urlopen", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 16, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 16, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 17, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 29, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 33, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 33, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 33, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "521010156", "text": "#!/usr/bin/env python3\nimport datetime\n\n\ndef time_contro(func):\n def time_spend():\n begin = datetime.datetime.now()\n print(func())\n finish = datetime.datetime.now()\n return (finish - begin).seconds\n\n return time_spend\n\n\ndef num_input(): ###输入一个数字,输出数字(包含)以内的所有质数\n while True:\n try:\n num = int(input('请输入一个大于2的数字,将输出它以内所有质数: '))\n except ValueError:\n continue\n if num < 2:\n continue\n break\n return num\n\n\ndef num_save(): ### 将数字存入列表\n zhishu_list1 = []\n for i in range(2, int(num_input()) // 1 + 1):\n zhishu_list1.append(i)\n return zhishu_list1\n\n\n@time_contro\ndef num_filter(): ### 过滤非质数,根据数学原理,循环不需超过最大数的平方根\n num_list = num_save()\n zhishu_list = []\n while len(num_list) > 1 and num_list[0] ** 2 < num_list[-1]:\n zhishu_list.append(num_list[0])\n num_list = list(filter(lambda x: x % num_list[0], num_list))\n return zhishu_list + num_list\n\n\nif __name__ == '__main__':\n print(num_filter())\n", "sub_path": "day07/zhishu2.py", "file_name": "zhishu2.py", "file_ext": "py", "file_size_in_byte": 1184, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "datetime.datetime.now", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 7, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "attribute"}]} +{"seq_id": "475520634", "text": "import argparse\nimport json\nimport os\nimport pickle\n\nimport tensorflow as tf\nfrom tensorflow.contrib.data import Dataset\nfrom tensorflow.contrib.learn import RunConfig\nfrom tensorflow.contrib.training import HParams\nfrom tensorflow.python.estimator.estimator import Estimator\n\nfrom imsat.hook import IteratorInitializerHook\nfrom train import model_fn\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"ImSAT predictor.\")\n parser.add_argument(\"--model-dir\", dest=\"model_dir\", type=str,\n default=\"ckp-dir/selector_True-dropout_True-ctx2out_True-prev2out_True-lr_0.001\",\n help=\"Path of checkpoint.\")\n parser.add_argument(\"--batch-size\", dest=\"batch_size\", type=int, default=2,\n help=\"Batch size.\")\n parser.add_argument(\"--selector\", dest=\"selector\", action=\"store_true\",\n help=\"Flag of whether to use selector for context.\")\n parser.add_argument(\"--dropout\", dest=\"dropout\", action=\"store_true\",\n help=\"Flag of whether to use dropout.\")\n parser.add_argument(\"--ctx2out\", dest=\"ctx2out\", action=\"store_true\",\n help=\"Flag of whether to add context to output.\")\n parser.add_argument(\"--prev2out\", dest=\"prev2out\", action=\"store_true\",\n help=\"Flag of whether to add previous state to output.\")\n return parser\n\n\ndef get_input_fn():\n with open(\"data/annotations/captions_val2014.json\") as f:\n annotations = json.load(f)\n id_to_filename = {img['id']: img['file_name'] for img in annotations['images']}\n filenames = [os.path.join(\"image/val\", fn) for fn in id_to_filename.values()]\n\n def input_fn():\n with tf.variable_scope(\"input_fn\"), tf.device(\"/cpu:0\"):\n filename_dataset = Dataset.from_tensor_slices(list(filenames))\n\n def decode_image(filename):\n image = tf.image.decode_jpeg(tf.read_file(filename), channels=3)\n image = tf.image.resize_images(image, [224, 224])\n image = tf.to_float(image)\n return image\n\n image_dataset = filename_dataset.map(decode_image)\n return image_dataset, None\n\n return id_to_filename.keys(), input_fn\n\n\ndef main():\n parsed_args = get_parser().parse_args()\n with open(os.path.join(\"data\", 'word_to_idx.pkl'), 'rb') as f:\n word_to_idx = pickle.load(f)\n hparams = HParams(vocab_size=len(word_to_idx),\n batch_size=parsed_args.batch_size,\n selector=parsed_args.selector,\n dropout=parsed_args.dropout,\n ctx2out=parsed_args.ctx2out,\n prev2out=parsed_args.prev2out)\n run_config = RunConfig(model_dir=parsed_args.model_dir)\n estimator = Estimator(\n model_fn=model_fn,\n params=hparams,\n config=run_config)\n\n image_ids, input_fn = get_input_fn()\n val_init_hook = IteratorInitializerHook(\"infer\")\n\n idx_to_word = {v: k for k, v in word_to_idx.items()}\n del word_to_idx\n\n pred_results = estimator.predict(input_fn, hooks=[val_init_hook])\n all_predicions = []\n num_generated = 0\n for pred in pred_results:\n result = ' '.join([idx_to_word[idx] for idx in pred if idx != 0 and idx != 2])\n all_predicions.append(result)\n num_generated = num_generated + 1\n if num_generated % 1000 == 0:\n print(\"Generated %d\" % num_generated)\n\n total_results = [{\"image_id\": img_id, \"caption\": pred}\n for img_id, pred\n in zip(image_ids, all_predicions)]\n with open(\"result.json\", \"w\") as f:\n json.dump(total_results, f)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 3550, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "json.load", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.device", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.contrib.data.Dataset.from_tensor_slices", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.contrib.data.Dataset", "line_number": 42, "usage_type": "name"}, {"api_name": "tensorflow.image.decode_jpeg", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.read_file", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.image.resize_images", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.to_float", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.contrib.training.HParams", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.contrib.learn.RunConfig", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.python.estimator.estimator.Estimator", "line_number": 67, "usage_type": "call"}, {"api_name": "train.model_fn", "line_number": 68, "usage_type": "name"}, {"api_name": "imsat.hook.IteratorInitializerHook", "line_number": 73, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "385220460", "text": "#!/usr/bin/env python3\n\n# ----------------- #\n# -- EXTRA TOOLS -- #\n# ----------------- #\n\nfrom json import dumps as json_dumps\n\nfrom mistool.os_use import cd, PPath, runthis\nfrom mistool.term_use import Step\nfrom orpyste.data import ReadBlock\n\nfrom .message import *\n\n\n# --------------- #\n# -- CONSTANTS -- #\n# --------------- #\n\nTHIS_DIR = PPath(__file__).parent\n\n\n# ----------------- #\n# -- LOCAL TOOLS -- #\n# ----------------- #\n\nMAIN_STEPS = Step()\n\n\n# ------------------ #\n# -- PEUF TO JSON -- #\n# ------------------ #\n\ndef projects_to_do(\n cookiecutter_temp,\n peuf_dir,\n all_peuf_choosen\n):\n projects_to_do = []\n already_build = []\n\n for onepeuf in all_peuf_choosen:\n with ReadBlock(\n content = onepeuf,\n mode = {\n \"keyval:: =\": \"project\"\n }\n ) as datas:\n flatdict = datas.mydict(\"std nosep nonb\")['project']\n\n# The key 'name' becomes 'project_name':\n flatdict['project_name'] = flatdict['name']\n del flatdict['name']\n\n\n# We have to listify values of keys starting by _.\n for k, v in flatdict.items():\n# Launches uses a very specific ways to store values.\n if k == \"_launched_by_factory\":\n command_n_patterns = [\n p.strip()\n for p in v.split(\":launch:\")\n ]\n\n command_n_patterns[1] = [\n p.strip()\n for p in command_n_patterns[1].split(\"|\")\n ]\n\n\n flatdict[k] = command_n_patterns\n\n# Keys using a coma sperated syntax in the peuf file.\n elif k in [\n \"_authors\",\n \"_for_test\",\n \"_for_factory\"\n ]:\n v = [\n [p.strip() for p in x.split(\",\")]\n for x in v.split(\";\")\n ]\n\n if k == \"_authors\":\n v = [\n [', '.join(x[:-1]), x[-1]]\n for x in v\n ]\n\n flatdict[k] = v\n\n# Ready to use list value.\n elif k[0] == \"_\":\n flatdict[k] = [x.strip() for x in v.split(\";\")]\n\n# Does we have nothing ?\n newprojectpath = cookiecutter_temp \\\n / (\n (onepeuf.parent / onepeuf.stem)\n -\n peuf_dir\n )\n\n if newprojectpath.is_dir():\n already_build.append(newprojectpath)\n\n# We have something to do.\n else:\n projects_to_do.append({\n 'lang' : onepeuf.parent.parent.name,\n 'kind' : onepeuf.parent.name,\n 'json' : flatdict,\n 'relpath': onepeuf.parent - peuf_dir,\n })\n\n# Some errors have been found.\n if already_build:\n error([\n \"Local project already build (erase it if you want to rebuild it): \"\n ] + [\n f\" + {p}\" for p in already_build\n ] + [\n '',\n \"Nothing has been done !\"\n ])\n\n exit(1)\n\n# Everything is ok.\n return projects_to_do\n\n\ndef cookifyles(\n cookiecutter_temp,\n peuf_dir,\n all_peuf_choosen\n):\n title(\"LET'S WORK...\")\n\n# Let's add the new json files.\n allprojects = projects_to_do(\n cookiecutter_temp,\n peuf_dir,\n all_peuf_choosen\n )\n\n for project in allprojects:\n SUB_STEPS = Step(\n start = 1,\n textit = lambda n, t: f\" {chr(96 + n)}/ {t}\"\n )\n\n projectreldir = f\"{project['relpath']}\" \\\n + f\"/{project['json']['project_name']}\"\n\n MAIN_STEPS(f\"Building {projectreldir}\")\n\n# Build the json file.\n SUB_STEPS(\"Updating the json file.\")\n\n jsonpath = cookiecutter_temp \\\n / project['relpath'] \\\n / 'cookiecutter.json'\n\n with jsonpath.open(\n encoding = 'utf-8',\n mode = 'w'\n ) as f:\n f.write(json_dumps(project['json']))\n\n# Call of cookiecutter.\n SUB_STEPS(\"Trying to launch cookiecutter.\")\n\n cmddir = cookiecutter_temp / project['relpath']\n cmddir = cmddir.parent\n\n with cd(cmddir):\n try:\n runthis(\n f\"cookiecutter --no-input {project['kind']}\",\n showoutput = True\n )\n\n except Exception as e:\n print('\\nCookiecutter fails. See above why.')\n exit(1)\n\n SUB_STEPS(\"cookiecutter has done its local job.\")\n\n# Moving, or not, the folder.\n SUB_STEPS(\"Do not forget to move the new folder.\")\n\n# Open the cookie templates folder.\n print()\n\n title(f'Opening folder of the cookie templates')\n\n runthis(f'open \"{cookiecutter_temp}\"')\n\n print(\"Here are all the starting project build.\")\n\n SUB_STEPS = Step(\n start = 1,\n textit = lambda n, t: f\" {n}: {t}\"\n )\n\n for project in allprojects:\n SUB_STEPS(\n f\"{project['lang']}/{project['json']['project_name']}\"\n )\n\n print()\n", "sub_path": "initpro/pymodules/cookify.py", "file_name": "cookify.py", "file_ext": "py", "file_size_in_byte": 5338, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "mistool.os_use.PPath", "line_number": 20, "usage_type": "call"}, {"api_name": "mistool.term_use.Step", "line_number": 27, "usage_type": "call"}, {"api_name": "orpyste.data.ReadBlock", "line_number": 43, "usage_type": "call"}, {"api_name": "mistool.term_use.Step", "line_number": 148, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 169, "usage_type": "call"}, {"api_name": "mistool.os_use.cd", "line_number": 177, "usage_type": "call"}, {"api_name": "mistool.os_use.runthis", "line_number": 179, "usage_type": "call"}, {"api_name": "mistool.os_use.runthis", "line_number": 198, "usage_type": "call"}, {"api_name": "mistool.term_use.Step", "line_number": 202, "usage_type": "call"}]} +{"seq_id": "586553687", "text": "import time\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport statistics\r\nfrom random import shuffle\r\n\r\n\r\n# this function compare speed of generation random\r\ndef np_random_compare():\r\n count_list = []\r\n time_np = []\r\n time_random = []\r\n for count in range(1, 100):\r\n start_time = time.time()\r\n np.random.uniform(0, 1, size=count)\r\n finish_time = time.time() - start_time\r\n time_np.append(finish_time)\r\n count_list.append(count)\r\n\r\n for i in count_list:\r\n time_ = 0\r\n for j in range(1, i + 1):\r\n start_time = time.time()\r\n random.uniform(0, 1)\r\n finish_time = time.time() - start_time\r\n time_ += finish_time\r\n time_random.append(time_)\r\n\r\n plt.plot(time_np, color=\"red\")\r\n plt.plot(time_random, color=\"blue\")\r\n plt.xlabel('count of random values')\r\n plt.ylabel('time')\r\n plt.show()\r\n print('ok')\r\n\r\n\r\n# next 3 function provide bogosort\r\ndef check_sorted_bogosort(list_):\r\n return all(list_[i] <= list_[i + 1] for i in range(len(list_) - 1))\r\n\r\n\r\ndef bogosort(list_):\r\n while not check_sorted_bogosort(list_):\r\n shuffle(list_)\r\n return list_\r\n\r\n\r\ndef bogosort_init():\r\n size_list = np.arange(2, 10)\r\n time_temp_list = []\r\n mean_ = np.zeros(8)\r\n sd_ = np.zeros(8)\r\n\r\n for size in size_list:\r\n for repeat in range(10):\r\n data = np.random.randint(1, 100, size=size)\r\n start_time = time.time()\r\n bogosort(data)\r\n time_temp_list.append(time.time() - start_time)\r\n mean_[size - 2] = statistics.mean(time_temp_list)\r\n sd_[size - 2] = statistics.stdev(time_temp_list)\r\n time_temp_list = []\r\n\r\n plt.plot(size_list, mean_, color=\"red\")\r\n plt.fill_between(size_list, mean_ - sd_, mean_ + sd_)\r\n plt.xlabel('Size of array')\r\n plt.ylabel('Time')\r\n plt.show()\r\n print('ok')\r\n\r\n\r\n# random walk: 100 step to any direction. All dots joined by line.\r\ndef random_walk():\r\n x = [0]\r\n y = [0]\r\n n = 0\r\n while n < 100:\r\n step = np.random.randint(1, 5)\r\n if step == 1:\r\n x.append(x[-1] + 1)\r\n y.append(y[-1])\r\n elif step == 2:\r\n y.append(y[-1] + 1)\r\n x.append(x[-1])\r\n elif step == 3:\r\n x.append(x[-1] - 1)\r\n y.append(y[-1])\r\n elif step == 4:\r\n y.append(y[-1] - 1)\r\n x.append(x[-1])\r\n n += 1\r\n plt.scatter(x, y, color=\"red\", s=10)\r\n plt.scatter(0, 0, color=\"black\", s=20)\r\n plt.plot(x, y, color=\"blue\")\r\n plt.xlabel('x')\r\n plt.ylabel('y')\r\n plt.title('Random Walk')\r\n plt.show()\r\n\r\n\r\n# this function draw Sierpinski triangle\r\ndef triangle():\r\n x = [0, 1, 2, 0]\r\n y = [0, 1.75, 0, 0]\r\n n = 0\r\n x_side = [np.random.uniform(0, 2)]\r\n y_side = [np.random.uniform(0, 2)]\r\n\r\n while n < 10000:\r\n direction = np.random.randint(0, 3)\r\n x_side.append(x_side[-1] - (x_side[-1] - x[direction]) / 2)\r\n y_side.append(y_side[-1] - (y_side[-1] - y[direction]) / 2)\r\n n += 1\r\n\r\n plt.scatter(x_side, y_side, color=\"red\", s=1)\r\n plt.plot(x, y, color=\"blue\")\r\n plt.title('Sierpinski triangle')\r\n plt.show()\r\n\r\n\r\n# this function draw Sierpinski carpet\r\ndef carpet():\r\n x = [0, 0, 1, 1, 0, 0.5, 0.5, 1]\r\n y = [0, 1, 0, 1, 0.5, 0, 1, 0.5]\r\n\r\n n = 0\r\n x_side = [np.random.uniform(0, 1)]\r\n y_side = [np.random.uniform(0, 1)]\r\n\r\n while n < 100000:\r\n direction = np.random.randint(0, 8)\r\n x_side.append((x_side[-1] + x[direction]) / 3)\r\n y_side.append((y_side[-1] + y[direction]) / 3)\r\n n += 1\r\n\r\n plt.scatter(x_side, y_side, color=\"red\", s=1)\r\n plt.title('Sierpiński carpet')\r\n plt.show()\r\n\r\n\r\n# this function shuffles letters inside a word\r\ndef text_spoil():\r\n text = input().split(' ')\r\n new_text = []\r\n for word in text:\r\n if len(word) > 1:\r\n word = list(word)\r\n new_order = word[1: (len(word) - 1)]\r\n shuffle(new_order)\r\n new_order.insert(0, word[0])\r\n new_order.append(word[-1])\r\n print(new_order)\r\n new_text.append(''.join(new_order))\r\n else:\r\n new_text.append(word)\r\n print(new_text)\r\n\r\n\r\nnp_random_compare()\r\nbogosort_init()\r\nrandom_walk()\r\ntriangle()\r\ncarpet()\r\ntext_spoil()\r\n", "sub_path": "random_all.py", "file_name": "random_all.py", "file_ext": "py", "file_size_in_byte": 4404, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "time.time", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 17, "usage_type": "call"}, {"api_name": "time.time", "line_number": 24, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 25, "usage_type": "call"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 57, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 58, "usage_type": "call"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 61, "usage_type": "call"}, {"api_name": "statistics.stdev", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 79, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "numpy.random.uniform", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 107, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 111, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "numpy.random.uniform", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 132, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "20670756", "text": "from __future__ import annotations\n\nimport copy\nimport logging\nfrom collections import defaultdict\nfrom typing import Any\n\nimport numpy as np\nimport torch\nfrom numpy import floating\nfrom numpy.typing import NDArray\n\nfrom mpol.datasets import Dartboard, GriddedDataset\nfrom mpol.precomposed import SimpleNet\nfrom mpol.training import TrainTest\nfrom mpol.plot import split_diagnostics_fig\n\n\nclass CrossValidate:\n r\"\"\"\n Utilities to run a cross-validation loop (implicitly running a training\n optimization loop), in order to compare MPoL models with different\n hyperparameter values\n\n Parameters\n ----------\n coords : `mpol.coordinates.GridCoords` object\n Instance of the `mpol.coordinates.GridCoords` class.\n imager : `mpol.gridding.DirtyImager` object\n Instance of the `mpol.gridding.DirtyImager` class.\n kfolds : int, default=5\n Number of k-folds to use in cross-validation\n split_method : str, default='random_cell'\n Method to split full dataset into train/test subsets\n seed : int, default=None\n Seed for random number generator used in splitting data\n learn_rate : float, default=0.5\n Neural network learning rate\n epochs : int, default=10000\n Number of training iterations\n convergence_tol : float, default=1e-3\n Tolerance for training iteration stopping criterion as assessed by\n loss function (suggested <= 1e-3)\n regularizers : nested dict, default={}\n Dictionary of image regularizers to use. For each, a dict of the \n strength ('lambda', float), whether to guess an initial value for lambda \n ('guess', bool), and other quantities needed to compute their loss term.\n Example:\n {\"sparsity\":{\"lambda\":1e-3, \"guess\":False},\n \"entropy\": {\"lambda\":1e-3, \"guess\":True, \"prior_intensity\":1e-10}\n }\n train_diag_step : int, default=None\n Interval at which training diagnostics are output. If None, no\n diagnostics will be generated.\n split_diag_fig : bool, default=False\n Whether to generate a diagnostic figure of dataset splitting into\n train/test sets.\n store_cv_diagnostics : bool, default=False\n Whether to store diagnostics of the cross-validation loop.\n save_prefix : str, default=None\n Prefix (path) used for saved figure names. If None, figures won't be\n saved\n device : torch.device, default=None\n Which hardware device to perform operations on (e.g., 'cuda:0').\n 'None' defaults to current device.\n verbose : bool, default=True\n Whether to print notification messages.\n \"\"\"\n\n def __init__(self, coords, imager, kfolds=5, split_method=\"random_cell\",\n seed=None, learn_rate=0.5, epochs=10000, convergence_tol=1e-3,\n regularizers={}, train_diag_step=None, split_diag_fig=False, \n store_cv_diagnostics=False, save_prefix=None, device=None, \n verbose=True\n ):\n self._coords = coords\n self._imager = imager\n self._kfolds = kfolds\n self._split_method = split_method\n self._seed = seed\n self._learn_rate = learn_rate\n self._epochs = epochs\n self._convergence_tol = convergence_tol\n self._regularizers = regularizers\n self._train_diag_step = train_diag_step\n self._split_diag_fig = split_diag_fig\n self._store_cv_diagnostics = store_cv_diagnostics\n self._save_prefix = save_prefix\n self._device = device\n self._verbose = verbose\n\n self._model = None\n self._diagnostics = None\n self._split_figure = None\n self._train_figure = None\n\n def split_dataset(self, dataset):\n r\"\"\"\n Split a dataset into training and test subsets.\n\n Parameters\n ----------\n dataset : PyTorch dataset object\n Instance of the `mpol.datasets.GriddedDataset` class\n\n Returns\n -------\n split_iterator : iterator returning tuple\n Iterator that provides a (train, test) pair of \n :class:`~mpol.datasets.GriddedDataset` for each k-fold\n \"\"\"\n if self._split_method == \"random_cell\":\n split_iterator = RandomCellSplitGridded(\n dataset=dataset, k=self._kfolds, seed=self._seed\n )\n\n elif self._split_method == \"dartboard\":\n # create a radial and azimuthal partition for the dataset\n dartboard = Dartboard(coords=self._coords)\n\n # use 'dartboard' to split full dataset into train/test subsets\n split_iterator = DartboardSplitGridded(\n dataset, k=self._kfolds, dartboard=dartboard, seed=self._seed\n )\n\n else:\n supported_methods = [\"dartboard\", \"random_cell\"]\n raise ValueError(\n \"'split_method' {} must be one of \"\n \"{}\".format(self._split_method, supported_methods)\n )\n\n return split_iterator\n\n def run_crossval(self, dataset):\n r\"\"\"\n Run a cross-validation loop for a model obtained with a given set of\n hyperparameters.\n\n Parameters\n ----------\n dataset : dataset object\n Instance of the `mpol.datasets.GriddedDataset` class\n Returns\n -------\n cv_score : dict \n Dictionary with mean and standard deviation of cross-validation \n scores across all k-folds, and all raw scores\n \"\"\"\n all_scores = []\n if self._store_cv_diagnostics:\n self._diagnostics = defaultdict(list)\n\n split_iterator = self.split_dataset(dataset)\n if self._split_diag_fig:\n split_fig, split_axes = split_diagnostics_fig(split_iterator, save_prefix=self._save_prefix)\n self._split_figure = (split_fig, split_axes)\n\n for kk, (train_set, test_set) in enumerate(split_iterator):\n if self._verbose:\n logging.info(\n \"\\nCross-validation: k-fold {} of {}\".format(kk, self._kfolds)\n )\n\n # if hasattr(self._device,'type') and self._device.type == 'cuda': # TODO: confirm which objects need to be passed to gpu\n # train_set, test_set = train_set.to(self._device), test_set.to(self._device)\n\n # create a new model and optimizer for this k_fold\n self._model = SimpleNet(coords=self._coords, nchan=self._imager.nchan)\n # if hasattr(self._device,'type') and self._device.type == 'cuda': # TODO: confirm which objects need to be passed to gpu\n # self._model = self._model.to(self._device)\n\n optimizer = torch.optim.Adam(self._model.parameters(), lr=self._learn_rate)\n\n trainer = TrainTest(\n imager=self._imager,\n optimizer=optimizer,\n epochs=self._epochs,\n convergence_tol=self._convergence_tol,\n regularizers=self._regularizers,\n train_diag_step=self._train_diag_step,\n kfold=kk,\n save_prefix=self._save_prefix,\n verbose=self._verbose,\n )\n\n # run training \n loss, loss_history = trainer.train(self._model, train_set)\n\n if self._store_cv_diagnostics:\n self._diagnostics[\"loss_histories\"].append(loss_history) \n # update regularizer strength values\n self._regularizers = trainer.regularizers\n # store the most recent train figure for diagnostics\n self._train_figure = trainer.train_figure \n \n # run testing\n all_scores.append(trainer.test(self._model, test_set))\n\n # average individual test scores to get the cross-val metric for chosen\n # hyperparameters\n cv_score = {\n \"mean\": np.mean(all_scores),\n \"std\": np.std(all_scores),\n \"all\": all_scores,\n }\n\n return cv_score\n\n @property\n def model(self):\n \"\"\"SimpleNet class instance\"\"\"\n return self._model\n\n @property\n def regularizers(self):\n \"\"\"Dict containing regularizers used and their strengths\"\"\"\n return self._regularizers\n\n @property\n def diagnostics(self):\n \"\"\"Dict containing diagnostics of the cross-validation loop\"\"\"\n return self._diagnostics\n\n @property\n def split_figure(self):\n \"\"\"(fig, axes) of train/test splitting diagnostic figure\"\"\"\n return self._split_figure\n\n @property\n def train_figure(self):\n \"\"\"(fig, axes) of most recent training diagnostic figure\"\"\"\n return self._train_figure\n\n\nclass RandomCellSplitGridded:\n r\"\"\"\n Split a GriddedDataset into :math:`k` subsets. Inherit the properties of\n the GriddedDataset. This object creates an iterator providing a\n (train, test) pair of :class:`~mpol.datasets.GriddedDataset` for each\n k-fold.\n\n Parameters\n ----------\n dataset : PyTorch dataset object\n Instance of the `mpol.datasets.GriddedDataset` class\n k : int, default=5\n Number of k-folds (partitions) of `dataset`\n seed : int, default=None\n Seed for PyTorch random number generator used to shuffle data before\n splitting\n channel : int, default=0\n Channel of the dataset to use in determining the splits\n\n Notes\n -----\n Once initialized, iterate through the datasets like:\n >>> split_iterator = crossval.RandomCellSplitGridded(dataset, k)\n >>> for (train, test) in split_iterator: # iterate through `k` datasets\n >>> ... # working with the n-th slice of `k` datasets\n >>> ... # do operations with train dataset\n >>> ... # do operations with test dataset\n\n Treats `dataset` as a single-channel object with all data in `channel`.\n\n The splitting doesn't select (preserve) Hermitian pairs of visibilities.\n \"\"\"\n\n def __init__(self, dataset, k=5, seed=None, channel=0):\n self.dataset = dataset\n self.k = k\n self.channel = channel\n\n # get indices for cells in the top 1% of gridded weight\n # (we'll want all training sets to have these high SNR points)\n nvis = len(self.dataset.vis_indexed)\n nn = int(nvis * 0.01)\n # get the nn-th largest value in weight_indexed\n w_thresh = np.partition(self.dataset.weight_indexed, -nn)[-nn]\n self._top_nn = torch.argwhere(\n self.dataset.weight_gridded[self.channel] >= w_thresh\n ).T\n\n # mask these indices\n self.top_mask = torch.ones(\n self.dataset.weight_gridded[self.channel].shape, dtype=bool\n )\n self.top_mask[self._top_nn[0], self._top_nn[1]] = False\n # use unmasked cells that also have data for splits\n self.split_mask = torch.logical_and(\n self.dataset.mask[self.channel], self.top_mask\n )\n split_idx = torch.argwhere(self.split_mask).T\n\n # shuffle indices to prevent radial/azimuthal patterns in splits\n if seed is not None:\n torch.manual_seed(seed)\n shuffle = torch.randperm(split_idx.shape[1])\n split_idx = split_idx[:, shuffle]\n\n # split indices into k subsets\n self.splits = torch.tensor_split(split_idx, self.k, dim=1)\n\n def __iter__(self):\n # current k-slice\n self._n = 0\n return self\n\n def __next__(self):\n if self._n < self.k:\n test_idx = self.splits[self._n]\n train_idx = torch.cat(\n ([self.splits[x] for x in range(len(self.splits)) if x != self._n]),\n dim=1,\n )\n # add the masked (high SNR) points to the current training set\n train_idx = torch.cat((train_idx, self._top_nn), dim=1)\n\n train_mask = torch.zeros(\n self.dataset.weight_gridded[self.channel].shape, dtype=bool\n )\n test_mask = torch.zeros(\n self.dataset.weight_gridded[self.channel].shape, dtype=bool\n )\n train_mask[train_idx[0], train_idx[1]] = True\n test_mask[test_idx[0], test_idx[1]] = True\n\n # copy original dataset\n train = copy.deepcopy(self.dataset)\n test = copy.deepcopy(self.dataset)\n\n # use the masks to limit new datasets to only unmasked cells\n train.add_mask(train_mask)\n test.add_mask(test_mask)\n\n self._n += 1\n\n return train, test\n\n else:\n raise StopIteration\n\n\nclass DartboardSplitGridded:\n r\"\"\"\n Split a GriddedDataset into :math:`k` non-overlapping chunks, internally partitioned by a Dartboard. Inherit the properties of the GriddedDataset. This object creates an iterator providing a (train, test) pair of :class:`~mpol.datasets.GriddedDataset` for each k-fold.\n\n Args:\n griddedDataset (:class:`~mpol.datasets.GriddedDataset`): instance of the gridded dataset\n k (int): the number of subpartitions of the dataset\n dartboard (:class:`~mpol.datasets.Dartboard`): a pre-initialized Dartboard instance. If ``dartboard`` is provided, do not provide ``q_edges`` or ``phi_edges``.\n q_edges (1D numpy array): an array of radial bin edges to set the dartboard cells in :math:`[\\mathrm{k}\\lambda]`. If ``None``, defaults to 12 log-linearly radial bins stretching from 0 to the :math:`q_\\mathrm{max}` represented by ``coords``.\n phi_edges (1D numpy array): an array of azimuthal bin edges to set the dartboard cells in [radians]. If ``None``, defaults to 8 equal-spaced azimuthal bins stretched from :math:`0` to :math:`\\pi`.\n seed (int): (optional) numpy random seed to use for the permutation, for reproducibility\n\n Once initialized, iterate through the datasets like\n\n >>> cv = crossval.DartboardSplitGridded(dataset, k)\n >>> for (train, test) in cv: # iterate among k datasets\n >>> ... # working with the n-th slice of k datasets\n >>> ... # do operations with train dataset\n >>> ... # do operations with test dataset\n\n \"\"\"\n\n def __init__(\n self,\n gridded_dataset: GriddedDataset,\n k: int,\n dartboard: Dartboard | None = None,\n seed: int | None = None,\n ):\n if k <= 0:\n raise ValueError(\"k must be a positive integer\")\n\n if dartboard is None:\n dartboard = Dartboard(coords=gridded_dataset.coords)\n\n self.griddedDataset = gridded_dataset\n self.k = k\n self.dartboard = dartboard\n\n # 2D mask for any UV cells that contain visibilities\n # in *any* channel\n stacked_mask = torch.any(self.griddedDataset.mask, dim=0)\n\n # get qs, phis from dataset and turn into 1D lists\n qs = self.griddedDataset.coords.packed_q_centers_2D[stacked_mask]\n phis = self.griddedDataset.coords.packed_phi_centers_2D[stacked_mask]\n\n # create the full cell_list\n self.cell_list = self.dartboard.get_nonzero_cell_indices(qs, phis)\n\n # partition the cell_list into k pieces\n # first, randomly permute the sequence to make sure\n # we don't get structured radial/azimuthal patterns\n if seed is not None:\n np.random.seed(seed)\n\n self.k_split_cell_list = np.array_split(\n np.random.permutation(self.cell_list), k\n )\n\n @classmethod\n def from_dartboard_properties(\n cls,\n gridded_dataset: GriddedDataset,\n k: int,\n q_edges: NDArray[floating[Any]],\n phi_edges: NDArray[floating[Any]],\n seed: int | None = None,\n ) -> DartboardSplitGridded:\n \"\"\"\n Alternative method to initialize a DartboardSplitGridded object from Dartboard parameters.\n\n Args:\n griddedDataset (:class:`~mpol.datasets.GriddedDataset`): instance of the gridded dataset\n k (int): the number of subpartitions of the dataset\n q_edges (1D numpy array): an array of radial bin edges to set the dartboard cells in :math:`[\\mathrm{k}\\lambda]`. If ``None``, defaults to 12 log-linearly radial bins stretching from 0 to the :math:`q_\\mathrm{max}` represented by ``coords``.\n phi_edges (1D numpy array): an array of azimuthal bin edges to set the dartboard cells in [radians]. If ``None``, defaults to 8 equal-spaced azimuthal bins stretched from :math:`0` to :math:`\\pi`.\n seed (int): (optional) numpy random seed to use for the permutation, for reproducibility\n \"\"\"\n dartboard = Dartboard(gridded_dataset.coords, q_edges, phi_edges)\n return cls(gridded_dataset, k, dartboard, seed)\n\n def __iter__(self) -> DartboardSplitGridded:\n self.n = 0 # the current k-slice we're on\n return self\n\n def __next__(self) -> tuple[GriddedDataset, GriddedDataset]:\n if self.n < self.k:\n k_list = self.k_split_cell_list.copy()\n cell_list_test = k_list.pop(self.n)\n\n # put the remaining indices back into a full list\n cell_list_train = np.concatenate(k_list)\n\n # create the masks for each cell_list\n train_mask = self.dartboard.build_grid_mask_from_cells(cell_list_train)\n test_mask = self.dartboard.build_grid_mask_from_cells(cell_list_test)\n\n # copy original dateset\n train = copy.deepcopy(self.griddedDataset)\n test = copy.deepcopy(self.griddedDataset)\n\n # and use these masks to limit new datasets to only unmasked cells\n train.add_mask(train_mask)\n test.add_mask(test_mask)\n\n self.n += 1\n\n return train, test\n\n else:\n raise StopIteration\n", "sub_path": "src/mpol/crossval.py", "file_name": "crossval.py", "file_ext": "py", "file_size_in_byte": 17714, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "mpol.datasets.Dartboard", "line_number": 119, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 152, "usage_type": "call"}, {"api_name": "mpol.plot.split_diagnostics_fig", "line_number": 156, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 161, "usage_type": "call"}, {"api_name": "mpol.precomposed.SimpleNet", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 173, "usage_type": "attribute"}, {"api_name": "mpol.training.TrainTest", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.partition", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.argwhere", "line_number": 280, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 285, "usage_type": "call"}, {"api_name": "torch.logical_and", "line_number": 290, "usage_type": "call"}, {"api_name": "torch.argwhere", "line_number": 293, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 297, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 298, "usage_type": "call"}, {"api_name": "torch.tensor_split", "line_number": 302, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 317, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 319, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 322, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 329, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 330, "usage_type": "call"}, {"api_name": "mpol.datasets.GriddedDataset", "line_number": 368, "usage_type": "name"}, {"api_name": "mpol.datasets.Dartboard", "line_number": 370, "usage_type": "name"}, {"api_name": "mpol.datasets.Dartboard", "line_number": 377, "usage_type": "call"}, {"api_name": "torch.any", "line_number": 385, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 398, "usage_type": "attribute"}, {"api_name": "numpy.array_split", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 401, "usage_type": "attribute"}, {"api_name": "mpol.datasets.GriddedDataset", "line_number": 407, "usage_type": "name"}, {"api_name": "numpy.typing.NDArray", "line_number": 409, "usage_type": "name"}, {"api_name": "numpy.floating", "line_number": 409, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 409, "usage_type": "name"}, {"api_name": "numpy.typing.NDArray", "line_number": 410, "usage_type": "name"}, {"api_name": "numpy.floating", "line_number": 410, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 410, "usage_type": "name"}, {"api_name": "mpol.datasets.Dartboard", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 436, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 443, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 444, "usage_type": "call"}, {"api_name": "mpol.datasets.GriddedDataset", "line_number": 430, "usage_type": "name"}]} +{"seq_id": "600219003", "text": "# -*-coding:utf-8 -*-\nimport cv2\nimport time\nimport os\nimport glob\n#img save path\nsave_path = '/data2/maocaixia/data/video_data/imgs/'\nvideo_path = '/data2/maocaixia/data/video_data/video_data_3_floor/'\nfiles_video = os.listdir(video_path)\nnames = []\nfor file_v in files_video:\n tmp1,tmp2 = os.path.splitext(file_v)\n names.append(tmp1)\n#camera = cv2.VideoCapture(0)\n#if (camera.isOpened()):\n# print('Open')\n#else:\n# print('Open Fails')\nfor video_name in names:\n video_name_path = video_path + video_name + '.flv'\n camera = cv2.VideoCapture(video_name_path)\n#pos_msec = camera.get(cv2.CAP_PROP_POS_MSEC)\n#print(pos_msec)\n#size = (int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)),\n# int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n#print('size:'+repr(size))\n\n#fps = 20\n pre_frame = None\n num = 0\n frame_num = 0\n while(1):\n #start = time.time()\n ret, frame = camera.read()\n frame_num += 1\n #gray_lwpCV = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n pos_msec = camera.get(cv2.CAP_PROP_POS_MSEC)\n #print(pos_msec) \n\n if not ret:\n print('cant open video')\n break\n gray_lwpCV = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n #end = time.time()\n #cv2.imshow(\"capture\", frame)\n #seconds = end - start\n #if seconds < 1.0 / fps:\n # time.sleep(1.0 / fps - seconds)\n gray_lwpCV = cv2.resize(gray_lwpCV, (500, 500))\n gray_lwpCV = cv2.GaussianBlur(gray_lwpCV, (21, 21), 0)\n\n if pre_frame is None:\n pre_frame = gray_lwpCV\n else:\n img_delta = cv2.absdiff(pre_frame, gray_lwpCV)\n thresh = cv2.threshold(img_delta, 25, 255, cv2.THRESH_BINARY)[1]\n thresh = cv2.dilate(thresh, None, iterations=2)\n image, contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for c in contours:\n if cv2.contourArea(c) < 1000:\n continue\n else:\n #print(cv2.contourArea(c))\n video_dir = save_path+str(video_name)\n if not os.path.exists(video_dir):\n os.mkdir(video_dir)\n #num += 1\n #print 'Saved images: %d\\r' % (num),\n #cv2.imwrite(save_path + str(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))) + '.jpg', frame)\n cv2.imwrite(video_dir+'/'+str(video_name)+'_'+str(frame_num)+'_'+str(int(pos_msec))+'.jpg', frame)\n break\n pre_frame = gray_lwpCV\n\n #if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n\n camera.release()\n #cv2.destroyAllWindows()\n", "sub_path": "motion_det.py", "file_name": "motion_det.py", "file_ext": "py", "file_size_in_byte": 2748, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.listdir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_POS_MSEC", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 43, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.absdiff", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 56, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "182425849", "text": "import os\nfrom PIL import Image\nimport numpy as np\nfrom torch.utils.data import Dataset, Subset\n\n\nclass KittiLoader(Dataset):\n def __init__(self, root_dir, mode, transform=None):\n left_dir = os.path.join(root_dir, \"image_02/data/\")\n self.left_paths = sorted(\n [os.path.join(left_dir, fname) for fname in os.listdir(left_dir)]\n )\n if mode == \"train\":\n right_dir = os.path.join(root_dir, \"image_03/data/\")\n self.right_paths = sorted(\n [os.path.join(right_dir, fname) for fname in os.listdir(right_dir)]\n )\n assert len(self.right_paths) == len(self.left_paths)\n self.transform = transform\n self.mode = mode\n\n def __len__(self):\n return len(self.left_paths)\n\n def __getitem__(self, idx):\n left_image = Image.open(self.left_paths[idx])\n if self.mode == \"train\":\n right_image = Image.open(self.right_paths[idx])\n sample = {\"left_image\": left_image, \"right_image\": right_image}\n\n if self.transform:\n sample = self.transform(sample)\n return sample\n else:\n return sample\n else:\n if self.transform:\n left_image = self.transform(left_image)\n return left_image\n\n\ndef split(labels, fraction=0.8):\n real, = np.where(labels)\n fake, = np.where(~labels)\n s_real = int(len(real) * fraction)\n s_fake = int(len(fake) * fraction)\n train, valid = (\n np.concatenate([real[:s_real], fake[:s_fake]]),\n np.concatenate([real[s_real:], fake[s_fake:]]),\n )\n return np.sort(train), np.sort(valid)\n\n\nclass OrthancData(Dataset):\n def __init__(self, roor_dir, mode, transform=None, labels=(False, True)):\n import orthanc\n\n reader = orthanc.dataset.HDF5StereoReader(roor_dir, 1, num_frames=1)\n train, test = split(reader.labels)\n indices = np.full_like(reader.labels, False)\n for label in labels:\n indices |= reader.labels == label\n if mode == \"train\":\n indices &= np.isin(np.arange(len(indices)), train)\n else:\n indices &= np.isin(np.arange(len(indices)), test)\n pos, = indices.nonzero()\n\n self.mode = mode\n self.transform = transform\n self.reader = Subset(reader, pos)\n\n def __getitem__(self, item):\n stereo, _ = self.reader[item]\n left_image, right_image = stereo[0]\n left_image, right_image = (\n Image.fromarray(left_image),\n Image.fromarray(right_image),\n )\n if self.mode == \"train\":\n sample = {\"left_image\": left_image, \"right_image\": right_image}\n\n if self.transform:\n sample = self.transform(sample)\n return sample\n else:\n return sample\n else:\n if self.transform:\n left_image = self.transform(left_image)\n return left_image\n\n def __len__(self):\n return len(self.reader)\n", "sub_path": "data_loader.py", "file_name": "data_loader.py", "file_ext": "py", "file_size_in_byte": 3059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 7, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 54, "usage_type": "name"}, {"api_name": "orthanc.dataset.HDF5StereoReader", "line_number": 58, "usage_type": "call"}, {"api_name": "orthanc.dataset", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.full_like", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.utils.data.Subset", "line_number": 71, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 77, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 77, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 78, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 78, "usage_type": "name"}]} +{"seq_id": "30329440", "text": "# ------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# -------------------------------------------------------------------------\n\nfrom typing import Union\nfrom azure.core.credentials import TokenCredential, AzureKeyCredential\nfrom azure.core.credentials_async import AsyncTokenCredential\nfrom azure.core.pipeline.policies import (\n AsyncBearerTokenCredentialPolicy,\n BearerTokenCredentialPolicy,\n)\nfrom .._shared.policy import HMACCredentialsPolicy\n\n\ndef get_authentication_policy(\n endpoint: str,\n credential: Union[TokenCredential, AsyncTokenCredential, AzureKeyCredential, str],\n decode_url: bool = False,\n is_async: bool = False,\n):\n # type: (...) -> Union[AsyncBearerTokenCredentialPolicy, BearerTokenCredentialPolicy, HMACCredentialsPolicy]\n \"\"\"Returns the correct authentication policy based on which credential is being passed.\n\n :param endpoint: The endpoint to which we are authenticating to.\n :type endpoint: str\n :param credential: The credential we use to authenticate to the service\n :type credential: Union[TokenCredential, AsyncTokenCredential, AzureKeyCredential, str]\n :param bool decode_url: `True` if there is a need to decode the url. Default value is `False`\n :param bool is_async: For async clients there is a need to decode the url\n\n :return: Either AsyncBearerTokenCredentialPolicy or BearerTokenCredentialPolicy or HMACCredentialsPolicy\n :rtype: ~azure.core.pipeline.policies.AsyncBearerTokenCredentialPolicy or\n ~azure.core.pipeline.policies.BearerTokenCredentialPolicy or\n ~azure.communication.networktraversal.shared.policy.HMACCredentialsPolicy\n \"\"\"\n\n if credential is None:\n raise ValueError(\"Parameter 'credential' must not be None.\")\n if hasattr(credential, \"get_token\"):\n if is_async:\n return AsyncBearerTokenCredentialPolicy(\n credential, \"https://communication.azure.com//.default\" # type: ignore\n )\n return BearerTokenCredentialPolicy(\n credential, \"https://communication.azure.com//.default\" # type: ignore\n )\n if isinstance(credential, (AzureKeyCredential, str)):\n return HMACCredentialsPolicy(endpoint, credential, decode_url=decode_url)\n\n raise TypeError(\n f\"Unsupported credential: {format(type(credential))}. Use an access token string to use HMACCredentialsPolicy\"\n \"or a token credential from azure.identity\"\n )\n", "sub_path": "sdk/communication/azure-communication-networktraversal/azure/communication/networktraversal/_shared/auth_policy_utils.py", "file_name": "auth_policy_utils.py", "file_ext": "py", "file_size_in_byte": 2607, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "typing.Union", "line_number": 19, "usage_type": "name"}, {"api_name": "azure.core.credentials.TokenCredential", "line_number": 19, "usage_type": "name"}, {"api_name": "azure.core.credentials_async.AsyncTokenCredential", "line_number": 19, "usage_type": "name"}, {"api_name": "azure.core.credentials.AzureKeyCredential", "line_number": 19, "usage_type": "name"}, {"api_name": "azure.core.pipeline.policies.AsyncBearerTokenCredentialPolicy", "line_number": 43, "usage_type": "call"}, {"api_name": "azure.core.pipeline.policies.BearerTokenCredentialPolicy", "line_number": 46, "usage_type": "call"}, {"api_name": "azure.core.credentials.AzureKeyCredential", "line_number": 49, "usage_type": "name"}, {"api_name": "_shared.policy.HMACCredentialsPolicy", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "2455530", "text": "import json\nimport collections\nimport xml.etree.ElementTree as ET\n\n# почитать про collections\n\ndef read_json(file, max_len_word=6, top_words=10):\n with open(file, 'r', encoding='utf-8') as f:\n data = json.load(f)\n description_words = []\n for item in data['rss']['channel']['items']:\n description = [word for word in item['description'].split() if len(word) > max_len_word]\n description_words.extend(description)\n counter_words = collections.Counter(description_words)\n print(counter_words.most_common(top_words))\n\n\ndef read_xml(file, max_len_word=6, top_words=10):\n tree = ET.parse(file)\n news = tree.findall('.//description')\n\n description_words = []\n\n for description in news:\n description_words.extend([word for word in description.text.split() if len(word) > max_len_word])\n counter_words = collections.Counter(description_words)\n print(counter_words.most_common(top_words))\n\n\n", "sub_path": "modules/own_functions.py", "file_name": "own_functions.py", "file_ext": "py", "file_size_in_byte": 965, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "json.load", "line_number": 9, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 14, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 19, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 19, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "301785855", "text": "\nimport pymysql\nimport os\n\nclear = lambda: os.system('cls')\nconnection = pymysql.connect(host='jakedb.cveowrfw9i31.us-east-2.rds.amazonaws.com', port=3306, user = 'jbmitchell747', passwd='Charlie747', db='baseballstats', cursorclass=pymysql.cursors.DictCursor)\ndef location_search():\n\n try:\n g = str(input(\"What game are you looking for?\"))\n with connection.cursor() as cursor:\n # Read a single record\n sql = \"SELECT * FROM `summer2018` WHERE `location`=%s\"\n cursor.execute(sql, (g,))\n result = cursor.fetchall()\n for n in result:\n print(\"\\n\")\n print(n)\n finally:\n connection.close()\n\n\ndef main():\n look = True\n while look:\n location_search()\n look = 'y' in input(\"Look Again? (y/n)\").lower()\n clear()\nmain()\n", "sub_path": "SQLBaseball/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 848, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.system", "line_number": 5, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 6, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 6, "usage_type": "attribute"}]} +{"seq_id": "325808521", "text": "import sys\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport numpy as np\nimport os\nfrom copy import deepcopy\nfrom tqdm import tqdm\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms\nfrom .dice_loss import *\n\nclass train_model():\n def __init__(self,net_model,train_dataset,test_dataset,lr,trian_batch_size,test_batch_size,gpu,model_save_path,sample_save_path):\n self.net_model=net_model\n self.train_dataset=train_dataset\n self.test_dataset=test_dataset\n self.lr=lr\n self.trian_batch_size=trian_batch_size\n self.test_batch_size=test_batch_size\n self.gpu=gpu\n self.model_save_path=model_save_path\n self.sample_save_path=sample_save_path\n def __call__(self,epoch_num):\n self.train_model(net_model=self.net_model,\n epoch_num=epoch_num,\n batch_size=self.trian_batch_size,\n test_batch_size=self.test_batch_size,\n lr=self.lr,\n train_dataset=self.train_dataset,\n test_dataset=self.test_dataset,\n gpu=self.gpu,\n model_save_path=self.model_save_path,\n pic_save_path=self.sample_save_path,\n check_bound=50,\n log_path='./train_log')\n print('Train_complete')\n def tensor_to_pic(self,tensor,pic_type='RGB'):\n tensor=tensor.cpu()>0.5\n pil_img=transforms.ToPILImage()(tensor.float()).convert(pic_type)\n return(pil_img)\n def test_model(self,net_model,test_dataset,test_batch_size,sample_save_path,iter_number,save_sample,gpu,check_bound=20,brive_model=False):\n test_generator=test_dataset.get_batch()\n total_dice_acc=0\n test_batch_number=test_dataset.show_data_number()//test_batch_size\n for i in tqdm(range(test_batch_number)):\n x_test_img,y_test_img=next(test_generator)\n if brive_model and i%10!=0:\n continue\n if gpu:\n x_test_img=x_test_img.cuda()\n y_test_img=y_test_img.cuda()\n y_test_img_=net_model(x_test_img)\n if save_sample and i < check_bound:\n check_x=x_test_img.clone().cpu()[0]\n check_y=y_test_img.clone().cpu()[0]\n check_y_=y_test_img_.clone().cpu()[0] \n x_img=self.tensor_to_pic(check_x)\n y_img=self.tensor_to_pic(check_y)\n y_img_=self.tensor_to_pic(check_y_)\n y_img_=Image.fromarray(np.asarray(y_img_))\n x_img.save('{}/{}_data.png'.format(sample_save_path,i))\n y_img.save('{}/{}_label.png'.format(sample_save_path,i))\n y_img_.save('{}/{}_pred.png'.format(sample_save_path,i)) \n total_dice_acc+=MulticlassDiceAcc(y_test_img_,y_test_img).item()\n ave_dice_acc=total_dice_acc/test_batch_number\n if brive_model:\n ave_dice_acc=ave_dice_acc*10\n return(ave_dice_acc)\n def train_model(self,net_model,epoch_num,batch_size,test_batch_size,lr,train_dataset,test_dataset,gpu,model_save_path='./save_model/',pic_save_path='./save_pic/',check_bound=50,log_path='./train_log'):\n if not os.path.exists(model_save_path):\n os.makedirs(model_save_path)\n if not os.path.exists(pic_save_path):\n os.makedirs(pic_save_path)\n # print info about dataset\n print('='*40)\n print('EPOCH_NUMBER :{}'.format(epoch_num))\n print('BATCH_SIZE :{}'.format(batch_size))\n print('TRAIN_DATA_NUM :{}'.format(train_dataset.show_data_number()))\n print('TEST_DATA_NUM :{}'.format(test_dataset.show_data_number()))\n print('USE_GPU :{}'.format(gpu))\n net_model=nn.DataParallel(net_model.cuda(),device_ids=[0,1,2,3])\n print('='*40)\n log_file=open(log_path,'w')\n # build loss fun & optimizer\n criterion=nn.BCELoss()\n optimizer=optim.SGD(net_model.parameters(),lr=lr,momentum=0.9,weight_decay=0.0005)\n # save best model\n best_model=None\n best_acc=0\n best_epoch=None\n # build train and test generator\n train_generator=train_dataset.get_batch()\n # some stuff about test\n total_dice_acc=0\n # run epoch/batch\n for epoch in range(epoch_num):\n print('START EPOCH : {}/{}'.format(epoch,epoch_num))\n epoch_loss=0\n print('Start train')\n trian_batch_number=train_dataset.show_data_number()//batch_size\n for index in tqdm(range(trian_batch_number)):\n x_img, y_img=next(train_generator)\n if gpu:\n x_img=x_img.cuda()\n y_img=y_img.cuda()\n y_img_=net_model(x_img)\n flat_y_img_=y_img_.view(-1)\n flat_y_img=y_img.view(-1)\n batch_loss=criterion(flat_y_img_,flat_y_img)\n optimizer.zero_grad()\n batch_loss.backward()\n optimizer.step()\n epoch_loss+=batch_loss.item()\n\n iter_number=epoch*trian_batch_number+index\n if iter_number % 100 == 0:\n sample_save_path='{}/{}'.format(pic_save_path,iter_number)\n if not os.path.exists(sample_save_path):\n os.makedirs(sample_save_path)\n ave_dice_acc = self.test_model(net_model,\n test_dataset,\n test_batch_size,\n sample_save_path,\n iter_number=iter_number,\n save_sample=True,\n gpu= True,\n brive_model=True)\n log_file.write('{} {} {}\\n'.format(iter_number,batch_loss.item(),ave_dice_acc))\n log_file.flush()\n if ave_dice_acc>=best_acc:\n best_acc=ave_dice_acc\n best_model=deepcopy(net_model.state_dict())\n best_epoch=iter_number\n\n ave_dice_acc = self.test_model(net_model,\n test_dataset,\n test_batch_size,\n sample_save_path,\n iter_number=epoch*batch_size+index,\n save_sample=False,\n gpu= True)\n ave_train_loss=epoch_loss*batch_size/trian_batch_number\n print('='*45)\n print('Epoch number : {}'.format(epoch))\n print('Train Loss : {}'.format(ave_train_loss))\n print('Dice_Acc : {}'.format(ave_dice_acc))\n print('='*45)\n if epoch %5 == 0 :\n torch.save(best_model,'{}/{}_checkpoint.pth'.format(model_save_path,epoch))\n\n print('='*45)\n print('Epoch Fisihed : ')\n print('Best Val Epoch: {}'.format(best_epoch))\n print('Best Val Loss : {}'.format(best_acc))\n log_file.write('Best epoch :{} ; Best val acc : {}\\n'.format(best_epoch,best_acc))\n log_file.close()\n torch.save(best_model,'{}/Unet_iter_{}.pth'.format(model_save_path,best_epoch))\n\n", "sub_path": "TEST_MODEL/Unet_model/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 7630, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "torchvision.transforms.ToPILImage", "line_number": 41, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 41, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 47, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 62, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.DataParallel", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.BCELoss", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 88, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 121, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "565867347", "text": "from django.shortcuts import render, get_object_or_404, redirect, reverse\nfrom .models import Candidate, VoteRecord, Activity\nfrom django.contrib.auth import login\nfrom django.contrib.auth.decorators import login_required\nfrom blog.forms import SignupForm\nfrom blog.sql import find_user, update_user, insert_user, update_user_voted_time, find_user_is_voted\nfrom .decorators import custome_login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse, HttpResponseRedirect\nfrom django.template import loader\nfrom django.core.paginator import Paginator\nimport datetime\n\n\n@csrf_exempt\ndef vote_index(request):\n candidate_list = Candidate.objects.all().order_by('-votes')\n candidate_count = candidate_list.count()\n paginator = Paginator(candidate_list, 5)\n if request.method == 'POST':\n data = {}\n page = int(request.POST.get('page'))\n candidate_list = paginator.get_page(page)\n\n if candidate_list.has_next():\n data['has_next'] = candidate_list.has_next()\n data['next_page_num'] = candidate_list.next_page_number()\n data['html'] = loader.render_to_string('basketball/lazy_load_candidates.html',\n {'candidate_list': candidate_list})\n return JsonResponse(data)\n else:\n activity = get_object_or_404(Activity, slug='basketball')\n activity.increase_views()\n candidate_list = paginator.get_page(1)\n votes_count = VoteRecord.objects.count()\n\n context = {'candidate_list': candidate_list,\n 'candidate_count': candidate_count,\n 'votes_count': votes_count,\n 'activity': activity}\n return render(request, 'basketball/template.html', context)\n\n\n@csrf_exempt\ndef lazy_load_candidates(request):\n if request.method == 'POST':\n data = {}\n page = int(request.POST.get('page', 1))\n candidate_list = Candidate.objects.all()[2:]\n paginator = Paginator(candidate_list, 2)\n candidate_list = paginator.get_page(page)\n if page > paginator.num_pages:\n data['stop_sign'] = True\n return JsonResponse(data)\n data['html'] = loader.render_to_string('basketball/lazy_load_candidates.html',\n {'candidate_list': candidate_list})\n return JsonResponse(data)\n\n\n@csrf_exempt\ndef search(request):\n if request.method == 'POST':\n data = {}\n name = request.POST.get('q')\n try:\n int(name)\n candidate_list = Candidate.objects.filter(id=name)\n except ValueError as e:\n candidate_list = Candidate.objects.filter(name__contains=name)\n if candidate_list.exists():\n data['result'] = loader.render_to_string('basketball/lazy_load_candidates.html',\n {'candidate_list': candidate_list})\n else:\n data['result'] = '没有搜索结果'\n return JsonResponse(data)\n\n\n@csrf_exempt\ndef vote(request, candidate_id):\n if request.method == 'POST':\n data = {}\n current_time = datetime.datetime.now()\n end_time = datetime.datetime(2018, 12, 1)\n if current_time > end_time:\n data['error_message'] = '亲,活动已经结束了'\n return JsonResponse(data)\n mobile = request.session.get('mobile_auth', None)\n is_voted = find_user_is_voted(mobile)\n if is_voted == 0:\n candidate = get_object_or_404(Candidate, pk=candidate_id)\n candidate.increase_votes()\n vote_record = VoteRecord(mobile=mobile, candidate=candidate)\n vote_record.save()\n update_user_voted_time(mobile)\n data['success_message'] = \"感谢你宝贵的一票\"\n else:\n data['error_message'] = '亲,一天只能投一票'\n return JsonResponse(data)\n\n\n@custome_login_required(login_url='/basketball/vote_login/')\ndef candidate_detail(request, candidate_id):\n candidate = get_object_or_404(Candidate, pk=candidate_id)\n vote_record_list = VoteRecord.objects.filter(candidate=candidate)\n context = {'candidate': candidate,\n 'vote_record_list': vote_record_list}\n return render(request, 'basketball/template_detail.html', context)\n\n\ndef vote_login(request):\n if request.method == 'POST':\n form = SignupForm(request.POST, request=request)\n if form.is_valid():\n try:\n del request.session['verify_code']\n except KeyError:\n pass\n dealer_id = request.session.get('dealer_id', None)\n phone = form.cleaned_data['phone']\n row = find_user(phone)\n if row:\n if dealer_id is not None:\n update_user(phone, dealer_id)\n else:\n insert_user(phone, dealer_id)\n request.session['mobile_auth'] = phone\n redirect_to = request.POST.get(\n 'next',\n request.GET.get('next', '')\n )\n print(redirect_to)\n return HttpResponseRedirect(redirect_to)\n\n else:\n dealer_id = request.GET.get('dealer', None)\n request.session['dealer_id'] = dealer_id\n form = SignupForm()\n\n return render(request, 'basketball/vote_login.html', {'form': form})", "sub_path": "basketball/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5418, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "models.Candidate.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Candidate.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Candidate", "line_number": 17, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 19, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 28, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 28, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Activity", "line_number": 32, "usage_type": "argument"}, {"api_name": "models.VoteRecord.objects.count", "line_number": 35, "usage_type": "call"}, {"api_name": "models.VoteRecord.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.VoteRecord", "line_number": 35, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 15, "usage_type": "name"}, {"api_name": "models.Candidate.objects.all", "line_number": 49, "usage_type": "call"}, {"api_name": "models.Candidate.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "models.Candidate", "line_number": 49, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 50, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 54, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 55, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 55, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 57, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 44, "usage_type": "name"}, {"api_name": "models.Candidate.objects.filter", "line_number": 67, "usage_type": "call"}, {"api_name": "models.Candidate.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.Candidate", "line_number": 67, "usage_type": "name"}, {"api_name": "models.Candidate.objects.filter", "line_number": 69, "usage_type": "call"}, {"api_name": "models.Candidate.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "models.Candidate", "line_number": 69, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 71, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 71, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 75, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 60, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 82, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 86, "usage_type": "call"}, {"api_name": "blog.sql.find_user_is_voted", "line_number": 88, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Candidate", "line_number": 90, "usage_type": "argument"}, {"api_name": "models.VoteRecord", "line_number": 92, "usage_type": "call"}, {"api_name": "blog.sql.update_user_voted_time", "line_number": 94, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 98, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 78, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 103, "usage_type": "call"}, {"api_name": "models.Candidate", "line_number": 103, "usage_type": "argument"}, {"api_name": "models.VoteRecord.objects.filter", "line_number": 104, "usage_type": "call"}, {"api_name": "models.VoteRecord.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "models.VoteRecord", "line_number": 104, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 107, "usage_type": "call"}, {"api_name": "decorators.custome_login_required", "line_number": 101, "usage_type": "call"}, {"api_name": "blog.forms.SignupForm", "line_number": 112, "usage_type": "call"}, {"api_name": "blog.sql.find_user", "line_number": 120, "usage_type": "call"}, {"api_name": "blog.sql.update_user", "line_number": 123, "usage_type": "call"}, {"api_name": "blog.sql.insert_user", "line_number": 125, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 132, "usage_type": "call"}, {"api_name": "blog.forms.SignupForm", "line_number": 137, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "491284648", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport requests\nfrom concurrent.futures import ThreadPoolExecutor, wait\n\nCATEGORIES = ('best movies', 'action', 'comedy', 'horror')\n\nURL_SEARCH_ID = \"http://localhost:8000/api/v1/titles/{}\"\nURL_MOVIE = \"http://localhost:8000/api/v1/titles/?sort_by=-imdb_score\"\nURL_CATEGORY = \"http://localhost:8000/api/v1/titles/?min_year=2000&genre={}&sort_by=-imdb_score&page={}\"\nURL_BEST_MOVIES = \"http://localhost:8000/api/v1/titles/?sort_by=-imdb_score&page={}\"\n\n\ndef load_url(url):\n '''Retourne les données de la requête sur l'API '''\n response = requests.get(url, stream=True)\n return response.json()\n\ndef get_urls():\n '''Récupère toutes les URLs lié au catégories choisi'''\n urls = []\n for category in CATEGORIES:\n if category != 'best movies':\n [urls.append(URL_CATEGORY.format(category, i+1)) for i in range(2)]\n else:\n [urls.append(URL_BEST_MOVIES.format(i+1)) for i in range(2)]\n return urls\n \ndef load_categories(id):\n '''\n Charge les données de l'API en utilisant le multithreading pour obtenir\n plus vite les informations.\n '''\n futures = []\n data = {category: [] for category in CATEGORIES}\n\n # On charge simultanement les données en stockant leur etat dans \"futures\"\n with ThreadPoolExecutor() as executor:\n for url in get_urls():\n try:\n futures.append(executor.submit(load_url, url))\n except Exception:\n print(\"error appear with url: {}\".format(url))\n\n best_movie_in = False\n for i, future in enumerate(futures):\n category = CATEGORIES[i//2]\n movies = []\n\n # On récupère le résultats de notre requête stocké dans la \"future\" qui a\n # terminé son chargement et on récupère l'ID et l'image du film.\n result = future.result()\n for r in result['results'] if i % 2 == 0 else result['results'][:(3 if best_movie_in else 2)]:\n if r['id'] != id:\n if len(movies) < 7:\n movies.append((r['id'], r['image_url']))\n else:\n best_movie_in = True\n\n print(\"nombre movies: {}\".format(len(movies)))\n data[category] += movies[:]\n\n best_movie_in = False if (i % 2 == 1) else best_movie_in\n\n return data\n\ndef load_best_movie():\n '''Charge les données de l'API pour trouver le meilleur film.'''\n response = requests.get(URL_MOVIE)\n if response.ok:\n response = response.json()\n id, score, vote = -1, 0, -1\n for movie in response['results'][:4]:\n if float(movie['imdb_score']) > score:\n id, score, vote = movie['id'], float(movie['imdb_score']), movie['votes']\n elif float(movie['imdb_score']) == score:\n if movie['votes'] > vote:\n id, score, vote = movie['id'], float(movie['imdb_score']), movie['votes']\n else:\n print('Error on load the best movie.')\n \n response = requests.get(URL_SEARCH_ID.format(id))\n if response.ok:\n response = response.json()\n return {\n 'id': response['id'],\n 'img': response['image_url'],\n 'genres': response['genres'],\n 'plot': response['long_description'],\n 'title': response['original_title'],\n }\n else:\n print('Error on load the best movie.')\n\n\n\n\n", "sub_path": "requestsAPI.py", "file_name": "requestsAPI.py", "file_ext": "py", "file_size_in_byte": 3418, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 70, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "194226767", "text": "from textblob import TextBlob\nimport matplotlib.pyplot as plt\nimport nltk\nimport tweepy\n\n#Code\n\nconsumer_key = 'XXXXXXXXXXXXXXXXXXXXX'\nconsumer_secret = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\naccess_token = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\naccess_token_secret = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'\n\nauth = tweepy.OAuthHandler(consumer_key,consumer_secret)\nauth.set_access_token(access_token,access_token_secret)\napi = tweepy.API(auth)\n\nsearched_tweets = []\nlast_id = -1\nwhile len(searched_tweets) < 10000:\n counting = 1000 - len(searched_tweets)\n new_tweets = api.search('AMLO',count=count, max_id=str(last_id - 1),lang='en')\n if not new_tweets:\n break\n searched_tweets.extend(new_tweets)\n last_id = new_tweets[-1].id\n\nprinting = 100 \nfor tweets in searched_tweets:\n if printing <= 0:\n break;\n printing = printing -1\n nword = TextBlob(tweets.text)\n ##if nword.detect_language() != 'en':\n ##nword = nword.translate(to='en')\n print(nword)\n\nmean = 0\ncount = 0\nbad = 0\ngood = 0\nnoOpinion = 0\nPloting = []\nfor tweets in searched_tweets:\n word = TextBlob(tweets.text)\n \n if word.sentiment.polarity > 0:\n good = good + 1\n \n elif word.sentiment.polarity == 0:\n noOpinion = noOpinion +1\n \n else:\n bad = bad + 1 \n Ploting.append(word.sentiment.polarity) \t \n #print(word.sentiment)\n mean = mean + word.sentiment.polarity\n count = count + 1\nmean = mean/count\nprint(\"\\nAnalysis based on: \",count,\" tweets\")\nprint(\"Positive Opinion: \",good)\nprint(\"Negative opinion: \",bad)\nprint(\"No opinion: \",noOpinion)\nprint(\"Sentiment Mean value: \",mean)\n\nplt.plot(Ploting)\nplt.ylabel('Sentiment')\nplt.xlabel('Number of tweets')\nplt.show()\n\n", "sub_path": "AMLO-twitter/Sentiment-analysis.py", "file_name": "Sentiment-analysis.py", "file_ext": "py", "file_size_in_byte": 1655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 13, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 15, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 32, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}]} +{"seq_id": "89944133", "text": "#!/usr/bin/env python3\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Student(object):\n\n BARCODE_LENGTH = 9 # 8 digits + 1 letter\n\n def __init__(self, student_id, user_id=\"\", name=\"\", identification_code=\"\", student_code=\"\", courses=[], rfid_code=''):\n self.student_id = student_id\n self.user_id = user_id\n self.name = name\n self.identification_code = identification_code\n self.student_code = str(student_code)\n self.courses = courses\n self.rfid_code = rfid_code\n\n def get_barcode(self):\n if self.student_code:\n previous_zeros = self.BARCODE_LENGTH - len(self.student_code)\n prefix = \"0\" * previous_zeros\n return prefix + self.student_code\n else:\n return self.identification_code\n\n def is_in_course(self, course_name):\n return course_name in self.courses\n\n def to_array(self):\n return [self.name, self.identification_code, self.student_code, self.rfid_code]\n\n def export_to_csv(self):\n barcode = self.get_barcode()\n csv_line = \"{},{},{},{},{}\".format(self.name, self.identification_code, self.student_code, barcode, self.rfid_code)\n return csv_line", "sub_path": "src/student.py", "file_name": "student.py", "file_ext": "py", "file_size_in_byte": 1223, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "262681663", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import BloodBankName,PathlabName\n\ndef index(request):\n\tBn = BloodBankName.objects.all()\n\tBloodbank = []\n\tlatitude = []\n\tlongitude = []\t\t\n\tfor i in range(0,len(Bn)):\n\t\tBloodbank.append(Bn[i].bloodbankname)\n\t\tlatitude.append(Bn[i].latitude)\n\t\tlongitude.append(Bn[i].longitude)\t\n\treturn render(request, 'maps/map.html',{'names':Bloodbank,'lat':latitude,'lng':longitude})\n\t\ndef index1(request):\n\tBn = PathlabName.objects.all()\n\tPathlab = []\n\tlatitude = []\n\tlongitude = []\t\t\n\tfor i in range(0,len(Bn)):\n\t\tPathlab.append(Bn[i].pathlabname)\n\t\tlatitude.append(Bn[i].latitude)\n\t\tlongitude.append(Bn[i].longitude)\t\n\treturn render(request, 'maps/map1.html',{'names':Pathlab,'lat':latitude,'lng':longitude})\n", "sub_path": "bloodbank/maps/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 782, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "models.BloodBankName.objects.all", "line_number": 6, "usage_type": "call"}, {"api_name": "models.BloodBankName.objects", "line_number": 6, "usage_type": "attribute"}, {"api_name": "models.BloodBankName", "line_number": 6, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}, {"api_name": "models.PathlabName.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "models.PathlabName.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.PathlabName", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "580565397", "text": "# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\carbon\\common\\script\\cef\\componentViews\\interiorPlaceableComponentView.py\nfrom carbon.common.script.cef.baseComponentView import BaseComponentView\nimport blue\n\nclass InteriorPlaceableComponentView(BaseComponentView):\n __guid__ = 'cef.InteriorPlaceableComponentView'\n __COMPONENT_ID__ = const.cef.INTERIOR_PLACEABLE_COMPONENT_ID\n __COMPONENT_DISPLAY_NAME__ = 'InteriorPlaceable'\n __COMPONENT_CODE_NAME__ = 'interiorPlaceable'\n __SHOULD_SPAWN__ = {'client': True}\n __DESCRIPTION__ = 'Contains the graphicID and handles the loading for placeable objects'\n GRAPHIC_ID = 'graphicID'\n MIN_SPEC_MATERIAL_PATH = 'minSpecOverideMetaMaterialPath'\n OVERRIDE_MATERIAL_PATH = 'overrideMetaMaterialPath'\n\n @classmethod\n def SetupInputs(cls):\n cls.RegisterComponent(cls)\n cls._AddInput(cls.GRAPHIC_ID, -1, cls.RECIPE, const.cef.COMPONENTDATA_GRAPHIC_ID_TYPE, displayName='Graphic ID')\n cls._AddInput(cls.MIN_SPEC_MATERIAL_PATH, None, cls.RECIPE, const.cef.COMPONENTDATA_STRING_TYPE, displayName='MinSpec Material Path', fileTypes='Red Files (*.red)|*.red')\n cls._AddInput(cls.OVERRIDE_MATERIAL_PATH, None, cls.RECIPE, const.cef.COMPONENTDATA_STRING_TYPE, displayName='Override Material Path', fileTypes='Red Files (*.red)|*.red')\n cls._AddInput('probeOffsetX', 0.0, cls.RECIPE, const.cef.COMPONENTDATA_FLOAT_TYPE, displayName='SH Probe Offset X')\n cls._AddInput('probeOffsetY', 0.0, cls.RECIPE, const.cef.COMPONENTDATA_FLOAT_TYPE, displayName='SH Probe Offset Y')\n cls._AddInput('probeOffsetZ', 0.0, cls.RECIPE, const.cef.COMPONENTDATA_FLOAT_TYPE, displayName='SH Probe Offset Z')\n cls._AddInput('depthOffset', 0.0, cls.RECIPE, const.cef.COMPONENTDATA_FLOAT_TYPE, displayName='Transparency Depth Offset')\n cls._AddInput('scale', '(1.0,1.0,1.0)', cls.RECIPE, const.cef.COMPONENTDATA_FLOAT_VECTOR_AS_STRING_TYPE, displayName='Scale')\n return\n\n @classmethod\n def ValidateComponent(cls, result, recipeID, recipeDict):\n materialPath = recipeDict[cls.__COMPONENT_ID__][cls.MIN_SPEC_MATERIAL_PATH]\n if materialPath is not None and not blue.paths.exists(materialPath):\n result.AddMessage('MisSpec material path is invalid: \"%s\"' % materialPath)\n overridePath = recipeDict[cls.__COMPONENT_ID__][cls.OVERRIDE_MATERIAL_PATH]\n if overridePath is not None and not blue.paths.exists(overridePath):\n result.AddMessage('Override material path is invalid: \"%s\"' % overridePath)\n scale = recipeDict[cls.__COMPONENT_ID__]['scale']\n if scale != '(1.0,1.0,1.0)' and const.cef.COLLISION_MESH_COMPONENT_ID in recipeDict:\n result.AddMessage('Cannot scale a placeable with collision')\n return\n\n\nInteriorPlaceableComponentView.SetupInputs()", "sub_path": "client/carbon/common/script/cef/componentViews/interiorPlaceableComponentView.py", "file_name": "interiorPlaceableComponentView.py", "file_ext": "py", "file_size_in_byte": 2926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "carbon.common.script.cef.baseComponentView.BaseComponentView", "line_number": 6, "usage_type": "name"}, {"api_name": "blue.paths.exists", "line_number": 33, "usage_type": "call"}, {"api_name": "blue.paths", "line_number": 33, "usage_type": "attribute"}, {"api_name": "blue.paths.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "blue.paths", "line_number": 36, "usage_type": "attribute"}]} +{"seq_id": "184035080", "text": "#students learning the RGB through creating a grayscale filter\n\nfrom PIL import Image\n\npath = \"sanfran.jpg\"\n\nimg = Image.open(path)\nwidth, height = img.size\ngray = Image.new('L', (width, height))\n\n\n\nfor x in range(width):\n for y in range(height):\n r, g, b = img.getpixel((x, y))\n value = r * 299.0/1000 + g * 587.0/1000 + b * 114.0/1000\n value = int(value)\n gray.putpixel((x, y), value)\n\ngray.save(\"GrayScaleImageOfSanFran.jpg\")\n", "sub_path": "2_Handcraft_Iteration_Games/9_levelofdifficulty.py", "file_name": "9_levelofdifficulty.py", "file_ext": "py", "file_size_in_byte": 460, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "PIL.Image.open", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 7, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "486467110", "text": "#\n# 23843번: 콘센트\n# https://www.acmicpc.net/problem/23843\n# Version: Python 3.9.7\n#\n# Created by WhiteHyun on 2021/12/18.\n#\n\n\nfrom sys import stdin\nfrom collections import Counter\n\nread = stdin.readline\n\nif __name__ == \"__main__\":\n n, m = map(int, read().split())\n device_time_list = sorted(map(int, read().split()))\n time_counter = Counter(device_time_list)\n outlet_list = [0] * m # 콘센트\n time = 0\n while device_time_list: # 전자기기를 다 충전할 때까지 반복\n for i in range(m):\n if outlet_list[i] == 0:\n outlet_list[i] = device_time_list.pop()\n break\n\n if 0 not in outlet_list: # 남는 콘센트가 없다면\n min_left_time = min(outlet_list) # 가장 빨리 끝나는 값만큼 시간이 지나야함\n for i in range(m):\n outlet_list[i] -= min_left_time\n\n time += min_left_time\n\n # 전자기기를 충전기에 다 꽃았고 나머지가 충전되어 있는 경우\n if outlet_list:\n time += max(outlet_list)\n print(time)\n", "sub_path": "boj/gold5/23843.py", "file_name": "23843.py", "file_ext": "py", "file_size_in_byte": 1094, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.stdin.readline", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 13, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "65735122", "text": "from celery import Celery\nimport sys\nimport glob\nsys.path.insert(1, '/cry_processor/')\n\nfrom cry_processor import Crylauncher\nfrom os import path\nimport os\nimport subprocess\nimport shutil\n\napp = Celery(\"tasks\", broker=\"redis://redis:6379/\", backend=\"redis://redis:6379/\")\n\napp.conf.default_queue = 'cry_py'\n\n@app.task\ndef cryprocess(run_mode, fi, fr, rr, meta, wd, th):\n hm, pr, ma, r, a, nu, mra, k, s, f = '', 1, '', 'do', False, '', False, 21, True, True\n od = path.join(wd, 'cry')\n final_result_dir = path.join(wd, 'cry_processor')\n od_file = path.join(wd, 'cry_result.zip')\n fi = path.join(wd, fi)\n fr = path.join(wd, fr)\n rr = path.join(wd, rr)\n if run_mode == 'proteins':\n fr, rr = '', ''\n else:\n fi = ''\n if meta:\n meta = True\n Crylauncher.LaunchProcessor(od, fi, hm, pr, th, ma, r, a, nu, mra, k, fr, rr, meta, s, f)\n\n os.mkdir(final_result_dir)\n os.replace(glob.glob(path.join(od, 'raw_full_*'))[0], path.join(final_result_dir, 'full_toxins.fasta'))\n os.replace(glob.glob(path.join(od, 'proteins_domain_mapping_full_*'))[0], path.join(final_result_dir, 'full_toxins.bed'))\n os.replace(path.join(od, 'logs', 'cry_processor.log'), path.join(final_result_dir, 'summary_log.txt'))\n os.replace(glob.glob(path.join(od, 'logs', 'diamond_matches_*'))[0], path.join(final_result_dir, 'diamond_classification.txt'))\n subprocess.call(\"cwd=$PWD; cd {2}; zip -r {0} {1}; cd $cwd\".format('cry_result.zip', 'cry_processor', wd), shell=True)\n shutil.rmtree(od)\n shutil.rmtree(final_result_dir)\n return wd", "sub_path": "containers/tasks/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 1579, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.path.insert", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "celery.Celery", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "name"}, {"api_name": "cry_processor.Crylauncher.LaunchProcessor", "line_number": 31, "usage_type": "call"}, {"api_name": "cry_processor.Crylauncher", "line_number": 31, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 33, "usage_type": "call"}, {"api_name": "os.replace", "line_number": 34, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "name"}, {"api_name": "os.replace", "line_number": 35, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "name"}, {"api_name": "os.replace", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "name"}, {"api_name": "os.replace", "line_number": 37, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "name"}, {"api_name": "subprocess.call", "line_number": 38, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 39, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "152832496", "text": "\"\"\"ayongampus URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom ayongampus import settings\nfrom ayongampus.activities import views as activities_views\nfrom ayongampus.authentication import views as ayongampus_auth_views\nfrom ayongampus.core import views as core_views\nfrom ayongampus.quiz.essay.views import EssayQuestionCreateView, EssayQuestionUpdateView\nfrom ayongampus.quiz.multichoice.views import MCQuestionAnswerCreate, MCQuestionAnswerUpdate\nfrom ayongampus.quiz.truefalse.views import TFQuestionCreateView, TFQuestionUpdateView\nfrom ayongampus.quiz.views import quiz_list, quiz_create, quiz_update, quiz_delete, quiz_detail, QuizQuestionCreateView, \\\n remove_question_from_quiz, QuestionListView, QuestionDeleteView, QuizUserProgressView, QuizMarkingList, \\\n QuizMarkingDetail\nfrom ayongampus.search import views as search_views\nfrom ayongampus.subject.views import SubjectChapterCreate, SubjectChapterUpdate, SubjectListView, SubjectDeleteView, \\\n ChapterAutocomplete, overview\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\n\nurlpatterns = [\n url(r'^pages/', include('django.contrib.flatpages.urls')),\n url(r'^4dm1n/', admin.site.urls),\n url(r'^quiz/', include('ayongampus.quiz.urls')),\n\n url(r'^progress/$', QuizUserProgressView.as_view(), name='quiz_progress'),\n\n url(r'^dashboard/$', overview, name='core_overview'),\n\n url(r'^dashboard/quizzes/$', quiz_list, name='core_quiz_list'),\n url(r'^dashboard/quizzes/create/$', quiz_create, name='core_quiz_create'),\n url(r'^dashboard/quizzes/(?P\\d+)/update/$', quiz_update, name='core_quiz_update'),\n url(r'^dashboard/quizzes/(?P\\d+)/delete/$', quiz_delete, name='core_quiz_delete'),\n url(r'^dashboard/quizzes/(?P[\\w-]+)/$', quiz_detail, name='core_quiz_detail'),\n\n url(r'^dashboard/quizzes/(?P\\d+)/q/$', QuizQuestionCreateView.as_view(), name='core_question_add'),\n url(r'^dashboard/quizzes/(?P[0-9]+)/delete/(?P[0-9]+)/$', remove_question_from_quiz,\n name='core_remove_question_from_quiz'),\n\n url(r'^dashboard/chapter-autocomplete/$', ChapterAutocomplete.as_view(), name='chapter-autocomplete', ),\n\n url(r'dashboard/subjects/$', SubjectListView.as_view(), name='core_subject_list'),\n url(r'dashboard/subjects/add/$', SubjectChapterCreate.as_view(), name='core_subject_add'),\n url(r'dashboard/subjects/(?P[0-9]+)/update/$', SubjectChapterUpdate.as_view(), name='core_subject_update'),\n url(r'dashboard/subjects/(?P[0-9]+)/delete/$', SubjectDeleteView.as_view(), name='core_subject_delete'),\n\n url(r'^dashboard/questions/mcq/$', MCQuestionAnswerCreate.as_view(), name='core_mcquestion_add'),\n url(r'^dashboard/questions/mcq/(?P[0-9]+)/$', MCQuestionAnswerUpdate.as_view(),\n name='core_mcquestion_update'),\n\n url(r'^dashboard/questions/$', QuestionListView.as_view(), name='core_question_list'),\n url(r'^dashboard/questions/(?P[0-9]+)/delete/$', QuestionDeleteView.as_view(), name='core_question_delete'),\n url(r'^dashboard/questions/tfq/$', TFQuestionCreateView.as_view(), name='core_tfquestion_add'),\n url(r'^dashboard/questions/tfq/(?P[0-9]+)/update/$', TFQuestionUpdateView.as_view(),\n name='core_tfquestion_update'),\n\n url(r'^dashboard/questions/essay/$', EssayQuestionCreateView.as_view(), name='core_essay_add'),\n url(r'^dashboard/questions/essay/(?P[0-9]+)/update/$', EssayQuestionUpdateView.as_view(),\n name='core_essay_update'),\n url(r'^', include('password_reset.urls')),\n\n url(r'^dashboard/marking/$', QuizMarkingList.as_view(), name='core_quiz_marking'),\n url(r'^dashboard/marking/(?P[\\d.]+)/$', QuizMarkingDetail.as_view(), name='core_quiz_marking_detail'),\n\n # bootcamp\n url(r'^$', core_views.home, name='home'),\n url(r'^login', auth_views.login, {'template_name': 'core/cover.html'},\n name='login'),\n url(r'^logout', auth_views.logout, {'next_page': '/'}, name='logout'),\n url(r'^signup/$', ayongampus_auth_views.signup, name='signup'),\n url(r'^settings/$', core_views.settings, name='settings'),\n url(r'^settings/picture/$', core_views.picture, name='picture'),\n url(r'^settings/upload_picture/$', core_views.upload_picture,\n name='upload_picture'),\n url(r'^settings/save_uploaded_picture/$', core_views.save_uploaded_picture,\n name='save_uploaded_picture'),\n url(r'^settings/password/$', core_views.password, name='password'),\n\n url(r'^feeds/', include('ayongampus.feeds.urls')),\n url(r'^questions/', include('ayongampus.questions.urls')),\n url(r'^articles/', include('ayongampus.articles.urls')),\n url(r'^messages/', include('ayongampus.messenger.urls')),\n url(r'^notifications/$', activities_views.notifications,\n name='notifications'),\n url(r'^notifications/last/$', activities_views.last_notifications,\n name='last_notifications'),\n url(r'^notifications/check/$', activities_views.check_notifications,\n name='check_notifications'),\n url(r'^search/$', search_views.search, name='search'),\n url(r'^quiz/', include('ayongampus.quiz.urls')),\n url(r'^members/', include('follow.urls')),\n\n url(r'^i18n/', include('django.conf.urls.i18n', namespace='i18n')),\n\n url(r'^(?P[^/]+)/$', core_views.profile, name='profile'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n\nadmin.site.site_header = 'Ayongampus Administration'", "sub_path": "ayongampus/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 6186, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 36, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 36, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.QuizUserProgressView.as_view", "line_number": 39, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.QuizUserProgressView", "line_number": 39, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 41, "usage_type": "call"}, {"api_name": "ayongampus.subject.views.overview", "line_number": 41, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 43, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.quiz_list", "line_number": 43, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 44, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.quiz_create", "line_number": 44, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 45, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.quiz_update", "line_number": 45, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 46, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.quiz_delete", "line_number": 46, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 47, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.quiz_detail", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 49, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.QuizQuestionCreateView.as_view", "line_number": 49, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.QuizQuestionCreateView", "line_number": 49, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 50, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.remove_question_from_quiz", "line_number": 50, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 53, "usage_type": "call"}, {"api_name": "ayongampus.subject.views.ChapterAutocomplete.as_view", "line_number": 53, "usage_type": "call"}, {"api_name": "ayongampus.subject.views.ChapterAutocomplete", "line_number": 53, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 55, "usage_type": "call"}, {"api_name": "ayongampus.subject.views.SubjectListView.as_view", "line_number": 55, "usage_type": "call"}, {"api_name": "ayongampus.subject.views.SubjectListView", "line_number": 55, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 56, "usage_type": "call"}, {"api_name": "ayongampus.subject.views.SubjectChapterCreate.as_view", "line_number": 56, "usage_type": "call"}, {"api_name": "ayongampus.subject.views.SubjectChapterCreate", "line_number": 56, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 57, "usage_type": "call"}, {"api_name": "ayongampus.subject.views.SubjectChapterUpdate.as_view", "line_number": 57, "usage_type": "call"}, {"api_name": "ayongampus.subject.views.SubjectChapterUpdate", "line_number": 57, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 58, "usage_type": "call"}, {"api_name": "ayongampus.subject.views.SubjectDeleteView.as_view", "line_number": 58, "usage_type": "call"}, {"api_name": "ayongampus.subject.views.SubjectDeleteView", "line_number": 58, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 60, "usage_type": "call"}, {"api_name": "ayongampus.quiz.multichoice.views.MCQuestionAnswerCreate.as_view", "line_number": 60, "usage_type": "call"}, {"api_name": "ayongampus.quiz.multichoice.views.MCQuestionAnswerCreate", "line_number": 60, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 61, "usage_type": "call"}, {"api_name": "ayongampus.quiz.multichoice.views.MCQuestionAnswerUpdate.as_view", "line_number": 61, "usage_type": "call"}, {"api_name": "ayongampus.quiz.multichoice.views.MCQuestionAnswerUpdate", "line_number": 61, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 64, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.QuestionListView.as_view", "line_number": 64, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.QuestionListView", "line_number": 64, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 65, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.QuestionDeleteView.as_view", "line_number": 65, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.QuestionDeleteView", "line_number": 65, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 66, "usage_type": "call"}, {"api_name": "ayongampus.quiz.truefalse.views.TFQuestionCreateView.as_view", "line_number": 66, "usage_type": "call"}, {"api_name": "ayongampus.quiz.truefalse.views.TFQuestionCreateView", "line_number": 66, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 67, "usage_type": "call"}, {"api_name": "ayongampus.quiz.truefalse.views.TFQuestionUpdateView.as_view", "line_number": 67, "usage_type": "call"}, {"api_name": "ayongampus.quiz.truefalse.views.TFQuestionUpdateView", "line_number": 67, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 70, "usage_type": "call"}, {"api_name": "ayongampus.quiz.essay.views.EssayQuestionCreateView.as_view", "line_number": 70, "usage_type": "call"}, {"api_name": "ayongampus.quiz.essay.views.EssayQuestionCreateView", "line_number": 70, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 71, "usage_type": "call"}, {"api_name": "ayongampus.quiz.essay.views.EssayQuestionUpdateView.as_view", "line_number": 71, "usage_type": "call"}, {"api_name": "ayongampus.quiz.essay.views.EssayQuestionUpdateView", "line_number": 71, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 73, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 73, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 75, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.QuizMarkingList.as_view", "line_number": 75, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.QuizMarkingList", "line_number": 75, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 76, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.QuizMarkingDetail.as_view", "line_number": 76, "usage_type": "call"}, {"api_name": "ayongampus.quiz.views.QuizMarkingDetail", "line_number": 76, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 79, "usage_type": "call"}, {"api_name": "ayongampus.core.views.home", "line_number": 79, "usage_type": "attribute"}, {"api_name": "ayongampus.core.views", "line_number": 79, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 80, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.login", "line_number": 80, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.views", "line_number": 80, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.logout", "line_number": 82, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.views", "line_number": 82, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 83, "usage_type": "call"}, {"api_name": "ayongampus.authentication.views.signup", "line_number": 83, "usage_type": "attribute"}, {"api_name": "ayongampus.authentication.views", "line_number": 83, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 84, "usage_type": "call"}, {"api_name": "ayongampus.core.views.settings", "line_number": 84, "usage_type": "attribute"}, {"api_name": "ayongampus.core.views", "line_number": 84, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 85, "usage_type": "call"}, {"api_name": "ayongampus.core.views.picture", "line_number": 85, "usage_type": "attribute"}, {"api_name": "ayongampus.core.views", "line_number": 85, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 86, "usage_type": "call"}, {"api_name": "ayongampus.core.views.upload_picture", "line_number": 86, "usage_type": "attribute"}, {"api_name": "ayongampus.core.views", "line_number": 86, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 88, "usage_type": "call"}, {"api_name": "ayongampus.core.views.save_uploaded_picture", "line_number": 88, "usage_type": "attribute"}, {"api_name": "ayongampus.core.views", "line_number": 88, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 90, "usage_type": "call"}, {"api_name": "ayongampus.core.views.password", "line_number": 90, "usage_type": "attribute"}, {"api_name": "ayongampus.core.views", "line_number": 90, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 92, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 92, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 93, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 93, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 94, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 94, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 95, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 95, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 96, "usage_type": "call"}, {"api_name": "ayongampus.activities.views.notifications", "line_number": 96, "usage_type": "attribute"}, {"api_name": "ayongampus.activities.views", "line_number": 96, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 98, "usage_type": "call"}, {"api_name": "ayongampus.activities.views.last_notifications", "line_number": 98, "usage_type": "attribute"}, {"api_name": "ayongampus.activities.views", "line_number": 98, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 100, "usage_type": "call"}, {"api_name": "ayongampus.activities.views.check_notifications", "line_number": 100, "usage_type": "attribute"}, {"api_name": "ayongampus.activities.views", "line_number": 100, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 102, "usage_type": "call"}, {"api_name": "ayongampus.search.views.search", "line_number": 102, "usage_type": "attribute"}, {"api_name": "ayongampus.search.views", "line_number": 102, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 103, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 103, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 104, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 104, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 106, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 106, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 108, "usage_type": "call"}, {"api_name": "ayongampus.core.views.profile", "line_number": 108, "usage_type": "attribute"}, {"api_name": "ayongampus.core.views", "line_number": 108, "usage_type": "name"}, {"api_name": "ayongampus.settings.DEBUG", "line_number": 111, "usage_type": "attribute"}, {"api_name": "ayongampus.settings", "line_number": 111, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 112, "usage_type": "call"}, {"api_name": "ayongampus.settings.MEDIA_URL", "line_number": 112, "usage_type": "attribute"}, {"api_name": "ayongampus.settings", "line_number": 112, "usage_type": "name"}, {"api_name": "ayongampus.settings.MEDIA_ROOT", "line_number": 113, "usage_type": "attribute"}, {"api_name": "ayongampus.settings", "line_number": 113, "usage_type": "name"}, {"api_name": "django.contrib.admin.site", "line_number": 115, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "544600382", "text": "import torch\r\nimport unicodedata\r\nfrom torchtext import data\r\nfrom torchtext import datasets\r\nimport time\r\nimport re\r\nimport spacy\r\nimport os\r\nimport _pickle as pickle\r\nfrom tqdm import tqdm\r\nfrom collections import Counter\r\nimport collections\r\nimport random\r\nrandom.seed(1234)\r\nimport mmap\r\nimport numpy as np\r\n\r\ndef get_num_lines(file_path):\r\n fp = open(file_path, \"r+\")\r\n buf = mmap.mmap(fp.fileno(), 0)\r\n lines = 0\r\n while buf.readline():\r\n lines += 1\r\n return lines\r\n\r\nSOS_WORD = ''\r\nEOS_WORD = ''\r\nPAD_WORD = ''\r\nUNK_WORD = ''\r\n\r\ndef dd():\r\n return defaultdict(int)\r\n\r\ndef dd3():\r\n return 3\r\n\r\ndef unicodeToAscii(s):\r\n return ''.join(\r\n c for c in unicodedata.normalize('NFD', s)\r\n if unicodedata.category(c) != 'Mn'\r\n )\r\n\r\ndef normalizeString(s):\r\n s = unicodeToAscii(s.lower().strip())\r\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\r\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\r\n return s\r\n\r\nclass Vocab():\r\n def __init__(self, max_vocab=10000):\r\n self.stoi = {}\r\n self.itos = []\r\n self.itos.append(SOS_WORD)\r\n self.itos.append(EOS_WORD)\r\n self.itos.append(PAD_WORD)\r\n self.itos.append(UNK_WORD)\r\n self.max_vocab = max_vocab\r\n\r\n\r\n def create_vocab_for_translation(self, all_translation_files_path):\r\n v = []\r\n f = open(all_translation_files_path, 'r')\r\n for lines in f:\r\n # lines = normalizeString(lines).split()\r\n lines = (lines).split()\r\n # print(lines)\r\n v.extend(lines)\r\n v = Counter(v).most_common(self.max_vocab)\r\n print('10 most common words: ' + str(v[:10]))\r\n v = [w[0] for w in v]\r\n self.itos.extend(v)\r\n self.stoi = collections.defaultdict(dd3, {v:k for k,v in enumerate(self.itos)})\r\n\r\n\r\n\r\nclass Preprocess():\r\n def __init__(self, train_src_file, train_tgt_file, val_src_file, val_tgt_file, vocab_generation_file, max_len=50):\r\n # self.vocab = vocab\r\n self.train_src_file = train_src_file\r\n self.train_tgt_file = train_tgt_file\r\n self.val_src_file = val_src_file\r\n self.val_tgt_file = val_tgt_file\r\n self.max_len = max_len\r\n\r\n self.lang2idx = {'en':0, 'fr':1, 'de':2, 'sos':3}\r\n\r\n self.vocab = Vocab()\r\n self.vocab.create_vocab_for_translation(vocab_generation_file)\r\n\r\n self.train_src = []\r\n self.train_tgt = []\r\n self.val_src = []\r\n self.val_tgt = []\r\n self.train_srclng = []\r\n self.train_tgtlng = []\r\n self.val_srclng = []\r\n self.val_tgtlng = []\r\n\r\n self.train_src_sentlen = []\r\n self.train_tgt_sentlen = []\r\n self.train_word_pair = []\r\n self.train_word_pair_y = []\r\n\r\n self.val_src_sentlen = []\r\n self.val_tgt_sentlen = []\r\n self.val_word_pair = []\r\n self.val_word_pair_y = []\r\n\r\n def preprocess_train(self):\r\n with open(self.train_src_file) as fsrc, open(self.train_tgt_file) as ftgt:\r\n for src_line, tgt_line in tqdm(zip(fsrc, ftgt), total=get_num_lines(self.train_src_file)):\r\n # src_line, tgt_line = normalizeString(src_line).split(), normalizeString(tgt_line).split()\r\n src_line, tgt_line = (src_line).split(), (tgt_line).split()\r\n # print(src_line);print(tgt_line);\r\n src_line = src_line\r\n tgt_line = tgt_line \r\n src_seq = [self.vocab.stoi[w] for w in src_line[1:]][:self.max_len]\r\n tgt_seq = [self.vocab.stoi[w] for w in [''] + tgt_line[1:]][:self.max_len]\r\n # print(src_seq);print(tgt_seq);\r\n # print(self.vocab.itos);break;\r\n\r\n\r\n if len(src_seq)<5 or len(tgt_seq)<5:\r\n continue\r\n\r\n self.train_src_sentlen.append((len(src_seq)-2)//15)\r\n self.train_tgt_sentlen.append((len(tgt_seq)-2)//15)\r\n\r\n x1 = range(1, len(src_seq)-2)\r\n # print(x1)\r\n src_words_idx = random.sample(x1, 2)\r\n\r\n\r\n if random.random() < 0.5:\r\n self.train_word_pair.append([src_seq[src_words_idx[1]], src_seq[src_words_idx[0]]])\r\n self.train_word_pair_y.append(0)\r\n else:\r\n self.train_word_pair.append([src_seq[src_words_idx[0]], src_seq[src_words_idx[1]]])\r\n self.train_word_pair_y.append(1)\r\n\r\n self.train_src.append(src_seq)\r\n self.train_tgt.append(tgt_seq)\r\n\r\n self.train_srclng.append(self.lang2idx[src_line[0]])\r\n self.train_tgtlng.append(self.lang2idx[tgt_line[0]])\r\n\r\n def preprocess_val(self):\r\n with open(self.val_src_file) as fsrc, open(self.val_tgt_file) as ftgt:\r\n for src_line, tgt_line in tqdm(zip(fsrc, ftgt), total=get_num_lines(self.val_src_file)):\r\n # src_line, tgt_line = normalizeString(src_line).split(), normalizeString(tgt_line).split()\r\n src_line, tgt_line = (src_line).split(), (tgt_line).split()\r\n src_line = src_line\r\n tgt_line = tgt_line\r\n\r\n src_seq = [self.vocab.stoi[w] for w in src_line[1:]][:self.max_len]\r\n tgt_seq = [self.vocab.stoi[w] for w in [''] + tgt_line[1:]][:self.max_len]\r\n\r\n if len(src_seq)<5 or len(tgt_seq)<5:\r\n continue\r\n self.val_src_sentlen.append((len(src_seq)-2)//15)\r\n self.val_tgt_sentlen.append((len(tgt_seq)-2)//15)\r\n x1 = range(1, len(src_seq)-2)\r\n src_words_idx = random.sample(x1, 2)\r\n\r\n\r\n if random.random() < 0.5:\r\n self.val_word_pair.append([src_seq[src_words_idx[1]], src_seq[src_words_idx[0]]])\r\n self.val_word_pair_y.append(0)\r\n else:\r\n self.val_word_pair.append([src_seq[src_words_idx[0]], src_seq[src_words_idx[1]]])\r\n self.val_word_pair_y.append(1)\r\n\r\n self.val_src.append(src_seq)\r\n self.val_tgt.append(tgt_seq)\r\n\r\n self.val_srclng.append(self.lang2idx[src_line[0]])\r\n self.val_tgtlng.append(self.lang2idx[tgt_line[0]])\r\n\r\n\r\n\r\n#\r\n# class MaxlenTranslationDataset(data.Dataset):\r\n# \t# Code modified from\r\n# \t# https://github.com/pytorch/text/blob/master/torchtext/datasets/translation.py\r\n# \t# to be able to control the max length of the source and target sentences\r\n#\r\n# def __init__(self, path, exts, fields, max_len=None, **kwargs):\r\n#\r\n# if not isinstance(fields[0], (tuple, list)):\r\n# fields = [('src', fields[0]), ('trg', fields[1])]\r\n#\r\n# src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts)\r\n#\r\n# examples = []\r\n# with open(src_path) as src_file, open(trg_path) as trg_file:\r\n# for src_line, trg_line in tqdm(zip(src_file, trg_file)):\r\n# src_line, trg_line = src_line.split(' '), trg_line.split(' ')\r\n# if max_len is not None:\r\n# \tsrc_line = src_line[:max_len]\r\n# src_line = src_line + exts[0].split('.')[1]\r\n# \tsrc_line = str(' '.join(src_line))\r\n# \ttrg_line = trg_line[:max_len]\r\n# trg_line = trg_line.insert(exts[1].split('.')[1], 0)\r\n# \ttrg_line = str(' '.join(trg_line))\r\n#\r\n# if src_line != '' and trg_line != '':\r\n# examples.append(data.Example.fromlist(\r\n# [src_line, trg_line], fields))\r\n#\r\n# super(MaxlenTranslationDataset, self).__init__(examples, fields, **kwargs)\r\n#\r\n#\r\n# class DataPreprocessor(object):\r\n# \tdef __init__(self):\r\n# \t\tself.src_field, self.trg_field = self.generate_fields()\r\n#\r\n# \tdef preprocess(self, train_path, val_path, train_file, val_file, src_lang, trg_lang, max_len=None):\r\n# \t\t# Generating torchtext dataset class\r\n# \t\tprint (\"Preprocessing vocab dataset...\")\r\n# \t\ttrain_dataset = self.generate_data(train_path, src_lang, trg_lang, max_len)\r\n#\r\n# \t\tprint (\"Saving train dataset...\")\r\n# \t\tself.save_data(train_file, train_dataset)\r\n#\r\n# \t\tprint (\"Preprocessing validation dataset...\")\r\n# \t\tval_dataset = self.generate_data(val_path, src_lang, trg_lang, max_len)\r\n#\r\n# \t\tprint (\"Saving validation dataset...\")\r\n# \t\tself.save_data(val_file, val_dataset)\r\n#\r\n# \t\t# Building field vocabulary\r\n# \t\tself.src_field.build_vocab(train_dataset, max_size=30000)\r\n# \t\tself.trg_field.build_vocab(train_dataset, max_size=30000)\r\n#\r\n# \t\tsrc_vocab, trg_vocab, src_inv_vocab, trg_inv_vocab = self.generate_vocabs()\r\n#\r\n# \t\tvocabs = {'src_vocab': src_vocab, 'trg_vocab':trg_vocab,\r\n# \t\t\t 'src_inv_vocab':src_inv_vocab, 'trg_inv_vocab':trg_inv_vocab}\r\n#\r\n# \t\treturn train_dataset, val_dataset, vocabs\r\n#\r\n# \tdef load_data(self, vocab_generation_file, train_file, val_file):\r\n#\r\n# \t\t# Loading saved data\r\n# vocab_dataset = torch.load(vocab_generation_file)\r\n# \t\tvocab_examples = vocab_dataset['examples']\r\n#\r\n# train_dataset = torch.load(train_file)\r\n# \t\ttrain_examples = train_dataset['examples']\r\n#\r\n# \t\tval_dataset = torch.load(val_file)\r\n# \t\tval_examples = val_dataset['examples']\r\n#\r\n# \t\t# Generating torchtext dataset class\r\n# \t\tfields = [('src', self.src_field), ('trg', self.trg_field)]\r\n#\r\n# vocab_dataset = data.Dataset(fields=fields, examples=train_examples)\r\n# train_dataset = data.Dataset(fields=fields, examples=train_examples)\r\n# \t\tval_dataset = data.Dataset(fields=fields, examples=val_examples)\r\n#\r\n# \t\t# Building field vocabulary\r\n# \t\tself.src_field.build_vocab(train_dataset, max_size=30000)\r\n# \t\tself.trg_field.build_vocab(train_dataset, max_size=30000)\r\n#\r\n# \t\tsrc_vocab, trg_vocab, src_inv_vocab, trg_inv_vocab = self.generate_vocabs()\r\n# \t\tvocabs = {'src_vocab': src_vocab, 'trg_vocab':trg_vocab,\r\n# \t\t\t 'src_inv_vocab':src_inv_vocab, 'trg_inv_vocab':trg_inv_vocab}\r\n#\r\n# \t\treturn train_dataset, val_dataset, vocabs\r\n#\r\n#\r\n# \tdef save_data(self, data_file, dataset):\r\n#\r\n# \t\texamples = vars(dataset)['examples']\r\n# \t\tdataset = {'examples': examples}\r\n#\r\n# \t\ttorch.save(dataset, data_file)\r\n#\r\n# \tdef generate_fields(self):\r\n# \t src_field = data.Field(tokenize=data.get_tokenizer('spacy'),\r\n# \t eos_token=EOS_WORD,\r\n# \t pad_token=PAD_WORD,\r\n# \t include_lengths=True,\r\n# \t batch_first=True)\r\n#\r\n# \t trg_field = data.Field(tokenize=data.get_tokenizer('spacy'),\r\n# \t eos_token=EOS_WORD,\r\n# \t pad_token=PAD_WORD,\r\n# \t include_lengths=True,\r\n# \t batch_first=True)\r\n#\r\n# \t return src_field, trg_field\r\n#\r\n# \tdef generate_data(self, data_path, src_lang, trg_lang, max_len=None):\r\n# \t exts = ('.'+src_lang, '.'+trg_lang)\r\n#\r\n# \t dataset = MaxlenTranslationDataset(\r\n# \t path=data_path,\r\n# \t exts=(exts),\r\n# \t fields=(self.src_field, self.trg_field),\r\n# \t max_len=max_len)\r\n#\r\n# \t return dataset\r\n#\r\n# \tdef generate_vocabs(self):\r\n# \t # Define string to index vocabs\r\n# \t src_vocab = self.src_field.vocab.stoi\r\n# \t trg_vocab = self.trg_field.vocab.stoi\r\n#\r\n# \t # Define index to string vocabs\r\n# \t src_inv_vocab = self.src_field.vocab.itos\r\n# \t trg_inv_vocab = self.trg_field.vocab.itos\r\n#\r\n# \t return src_vocab, trg_vocab, src_inv_vocab, trg_inv_vocab\r\n", "sub_path": "data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 11557, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "random.seed", "line_number": 14, "usage_type": "call"}, {"api_name": "mmap.mmap", "line_number": 20, "usage_type": "call"}, {"api_name": "unicodedata.normalize", "line_number": 39, "usage_type": "call"}, {"api_name": "unicodedata.category", "line_number": 40, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 45, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 46, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 68, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 72, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 111, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 131, "usage_type": "call"}, {"api_name": "random.random", "line_number": 134, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 149, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 163, "usage_type": "call"}, {"api_name": "random.random", "line_number": 166, "usage_type": "call"}]} +{"seq_id": "73776054", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport readchar\nimport sys\nimport time\n\nimport controller as controller\nfrom reset_arm import reset_arm\nfrom test_arm import test_arm\n\n\nsys.dont_write_bytecode = True\n\n\ndef move_as(base, shoulder, elbow, wrist, gripper, char):\n speed = 100\n\n if char == 's':\n base.set_position_res(base.position - 1, speed)\n print (base.position)\n elif char == 'f':\n base.set_position_res(base.position + 1, speed)\n print (base.position)\n elif char == 'e':\n shoulder.set_position_res(shoulder.position + 1, speed)\n print (shoulder.position)\n elif char == 'd':\n shoulder.set_position_res(shoulder.position - 1, speed)\n print (shoulder.position)\n elif char == 'g':\n elbow.set_position_res(elbow.position - 1, speed)\n print (elbow.position)\n elif char == 't':\n elbow.set_position_res(elbow.position + 1, speed)\n print (elbow.position)\n elif char == 'u':\n wrist.set_position(wrist.position + 1, speed)\n print (wrist.position)\n elif char == 'i':\n wrist.set_position(wrist.position - 1, speed)\n print (wrist.position)\n elif char == 'j':\n gripper.set_position(gripper.position + 3, speed)\n print (gripper.position)\n elif char == 'k':\n gripper.set_position(gripper.position - 3, speed)\n print (gripper.position)\n elif char == ' ':\n if gripper.position == 0:\n gripper.set_position(100, speed)\n else:\n gripper.set_position(0, speed)\n else:\n pass\n\n # time.sleep(0.5)\n\ndef main():\n arm = controller.Arm()\n arm.reset()\n\n path = []\n while True:\n # for char in path:\n char = readchar.readchar()\n hex_char = hex(ord(char))\n\n if hex_char == '0x3':\n print(\"path followed: {}\".format(path))\n\n print(\"received {} in hex, exiting\".format(hex_char))\n\n sys.exit()\n\n print(char)\n\n path.append(char)\n\n move_as(arm.base, arm.shoulder, arm.elbow, warm.rist, arm.gripper, char)\n\n if char == 'r':\n reset_arm(arm.base, arm.shoulder, arm.elbow, warm.rist, arm.gripper)\n\n for char in path:\n move_as(arm.base, arm.shoulder, arm.elbow, warm.rist, arm.gripper, char)\n\n path = []\n elif char == 'v':\n # filename = 'path_' + str(datetime.datetime.now()).replace(' ', '_')[:-3] + '.txt'\n filename = 'path.txt'\n\n with open(filename, 'w') as f:\n f.write(''.join(path)[:-1])\n\n elif char == 'c':\n filename = 'path.txt'\n\n with open(filename, 'r') as f:\n path = f.readlines()\n print(path)\n\n reset_arm(arm.base, arm.shoulder, arm.elbow, warm.rist, arm.gripper)\n\n for char in path[0]:\n move_as(arm.base, arm.shoulder, arm.elbow, warm.rist, arm.gripper, char)\n\n path = []\n\n\nif __name__ == '__main__':\n main()\n\n", "sub_path": "Robot/manual_control.py", "file_name": "manual_control.py", "file_ext": "py", "file_size_in_byte": 3171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.dont_write_bytecode", "line_number": 19, "usage_type": "attribute"}, {"api_name": "controller.Arm", "line_number": 66, "usage_type": "call"}, {"api_name": "readchar.readchar", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 80, "usage_type": "call"}, {"api_name": "reset_arm.reset_arm", "line_number": 89, "usage_type": "call"}, {"api_name": "reset_arm.reset_arm", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "93432019", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nurl = 'http://suninjuly.github.io/cats.html'\nbrowser = webdriver.Chrome()\n\ntry:\n browser.get(url)\n browser.implicitly_wait(5)\n browser.find_element_by_id(\"button\")\nfinally:\n browser.close()\n", "sub_path": "lesson10/lesson10_step6.py", "file_name": "lesson10_step6.py", "file_ext": "py", "file_size_in_byte": 278, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 5, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "485336401", "text": "import argparse\n\n\ndef add_common_args(parser: argparse.ArgumentParser):\n parser.add_argument(\n '--dry-run',\n action='store_true',\n default=False,\n help='If true, will not actually do any changes to i3 workspaces.')\n parser.add_argument(\n '--log-level',\n choices=('debug', 'info', 'warning', 'error', 'critical'),\n default='warning',\n help='Logging level for stderr and syslog.')\n\n\ndef add_workspace_naming_args(parser: argparse.ArgumentParser) -> None:\n parser.add_argument(\n '--window-icons-all-groups',\n action='store_true',\n default=False,\n help='If true, will add the icons of the open windows to workspaces'\n ' in all groups, and not just the active group. Also implies '\n '--window-icons.')\n parser.add_argument(\n '--renumber-workspaces',\n action='store_true',\n default=False,\n help='If true, will renumber workspaces in every groups so that they '\n 'are in numerical order, similar to tmux\\'s renumber-windows option.')\n", "sub_path": "i3wsgroups/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 1072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 4, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "218731387", "text": "import openpyxl\r\nimport os\r\n\r\n\r\nclass GetDataFromExcel:\r\n \r\n def __init__(self, filename=os.path.join(os.getcwd(), \"configure\", \"configure_stocks.xlsx\"),\r\n sheet_name_1=\"Credentials\", sheet_name_2=\"stocks_list\"):\r\n self.filename = filename\r\n self.sheet_name_1 = sheet_name_1\r\n self.sheet_name_2 = sheet_name_2\r\n \r\n try:\r\n self.wb = openpyxl.load_workbook(self.filename, read_only=True)\r\n self.sheet_1 = self.wb[self.sheet_name_1]\r\n self.sheet_2 = self.wb[self.sheet_name_2]\r\n except Exception as e:\r\n print(\"Excel file is missing\",str(e))\r\n raise\r\n \r\n def get_credentials(self, api_key_cell='B2', api_secret_cell='B3', access_token_cell='B4'):\r\n api_key = self.sheet_1[api_key_cell].value\r\n api_secret = self.sheet_1[api_secret_cell].value\r\n access_token = self.sheet_1[access_token_cell].value\r\n \r\n return api_key.replace(\" \", \"\"), api_secret.replace(\" \", \"\"), access_token.replace(\" \", \"\")\r\n \r\n def get_other_config_values(self, cells=[]): #list\r\n \r\n fetched_values = []\r\n for _value in cells:\r\n fetched_values.append(self.sheet_1[_value].value)\r\n \r\n return fetched_values\r\n \r\n def get_stock_list(self):\r\n \r\n stock_list = []\r\n for row in self.sheet_2.iter_rows(row_offset=0):\r\n if row[0].value != \"\" and row[0].value is not None:\r\n data = row[0].value\r\n stock_list.append(data.replace(\" \", \"\"))\r\n else:\r\n pass\r\n \r\n return stock_list\r\n\r\n\r\n", "sub_path": "algo/other_files/GetDataFromExcel.py", "file_name": "GetDataFromExcel.py", "file_ext": "py", "file_size_in_byte": 1680, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 7, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "266816489", "text": "##################################################\n#\n# origin: https://github.com/Sadaival/Hand-Gestures\n#\n##################################################\nimport traceback\nimport cv2\nimport numpy as np\nimport math\n\n\ncap = cv2.VideoCapture(-1)\n\n# Reduce the size of video to 320x240 so rpi can process faster\ncap.set(3,320)\ncap.set(4,240)\n\n# Variable for finger count\nfinger = 0 # Number for stop state\n \nwhile(1):\n \n try:\n _, frame = cap.read()\n #frame=cv2.flip(frame,1)\n kernel = np.ones((4,4),np.uint8)\n \n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) \n \n # Glove HSV range\n lower_hand = (110,109,12)\n upper_hand = (120,255,84)\n \n # Get only glove color\n mask = cv2.inRange(hsv, lower_hand, upper_hand)\n \n # Noise filtering\n mask = cv2.GaussianBlur(mask,(5,5),0)\n \n # Erosion and dilation (need to be adjusted)\n mask = cv2.erode(mask,kernel,iterations = 1)\n mask = cv2.dilate(mask,kernel,iterations = 2)\n \n # Noise filtering\n mask = cv2.GaussianBlur(mask,(5,5),0) \n \n # Find contour\n contours,hierarchy= cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n \n # Get max area in detected contour (which is the hand)\n area_max = 0\n contour_index = 0\n for i in range(len(contours)):\n cnt = contours[i]\n area_temp = cv2.contourArea(cnt)\n if area_temp > area_max:\n area_max = area_temp\n contour_index = i\n cnt = contours[contour_index]\n \n # Create convex hull of hand\n hull = cv2.convexHull(cnt)\n \n # Smooth and approximate the contour\n # http://creat-tabu.blogspot.com/2013/08/opencv-python-hand-gesture-recognition.html\n cnt_ap= cv2.approxPolyDP(cnt,0.0005*cv2.arcLength(cnt,True),True)\n\n # Calculate contour and hull area\n # To compare finger digit 0 or 1\n area_cnt = cv2.contourArea(cnt)\n area_hull = cv2.contourArea(hull)\n \n # Recalculate hull and calculate defects\n # Need returnPoints to be False\n hull = cv2.convexHull(cnt_ap, returnPoints=False)\n defects = cv2.convexityDefects(cnt_ap, hull)\n \n defects_count=0\n # Loop to find number of defects\n for i in range(defects.shape[0]):\n s,e,f,d = defects[i,0]\n start = tuple(cnt_ap[s][0])\n end = tuple(cnt_ap[e][0])\n far = tuple(cnt_ap[f][0])\n \n # Triangle lengths\n a = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)\n b = math.sqrt((far[0] - end[0])**2 + (far[1] - end[1])**2)\n c = math.sqrt((start[0] - end[0])**2 + (start[1] - end[1])**2)\n\n # Distance between point and convex hull\n # Ideally not needed\n # s = (a+b+c)/2\n # ar = math.sqrt(s*(s-a)*(s-b)*(s-c))\n # d=(2*ar)/a\n\n # Calculate angle using cosine rule\n angle = math.acos((a**2 + b**2 - c**2) / (2 * a * b)) * 57.295\n \n # If angle <90, it is a defect caused by finger raised\n if angle < 90:\n defects_count += 1\n \n # Finger print\n # 0 defect = finger 0 or 1\n if defects_count == 0:\n # If hull is 10% bigger than contour => finger 1\n if (area_hull-area_cnt)/(area_cnt) > .1: \n finger = 1\n else:\n finger = 0\n \n # 1 defect => finger 2\n elif defects_count == 1:\n finger = 2\n \n # 2 defects => finger 3\n elif defects_count == 2 :\n finger = 3\n \n # 3 defects => finger 4\n elif defects_count == 3:\n finger = 4\n \n # 4 defects => finger 5\n elif defects_count == 4:\n finger = 5\n\n #show the windows\n cv2.imshow('mask',mask)\n cv2.imshow('frame',frame)\n print(finger)\n \n except Exception:\n pass\n \n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n \ncv2.destroyAllWindows()\ncap.release() \n", "sub_path": "Code/tests/final_hand_detect.py", "file_name": "final_hand_detect.py", "file_ext": "py", "file_size_in_byte": 4278, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "cv2.VideoCapture", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.erode", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.convexHull", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.approxPolyDP", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.arcLength", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.convexHull", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.convexityDefects", "line_number": 76, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 87, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 88, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 89, "usage_type": "call"}, {"api_name": "math.acos", "line_number": 98, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 130, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 131, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 137, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "478327085", "text": "import os\nimport numpy as np\nimport pandas as pd\nimport functools\nfrom data import Data\nfrom evaluation import *\nfrom params import FLAGS\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Layer, TimeDistributed, LSTM, GRU, Reshape, Conv1D, Concatenate, Dense, Embedding, Lambda, Flatten, BatchNormalization, subtract\n\n\n# from keras.backend.tensorflow_backend import set_session\n\n# from keras.callbacks import CSVLogger, Callback\n\n# import matplotlib\n# matplotlib.use('agg')\n\n\"\"\"\n* Filename: aaai19tensorflow2.py (Tensorflow v2.0)\n* Implemented by Sundong Kim (sundong.kim@kaist.ac.kr) \n* Summary: Implementation of the State-of-the-art method DRSA [AAAI'19]. \n This performance of this baseline should be comparable to our method on first-time visitor testing set, \n whereas the performance on train_censored test set should be worse than our method. \n We implemented it again from the scratch, to guarantee same training input and same evaluation technique. \n* Reference: Deep Recurrent Survival Analysis [AAAI'19] by Kan Ren et al. \n* Dependency: \n * data.py (for some data preprocessing)\n * evaluation.py (for evaluation)\n * params.py (for parameters)\n * Data directory: ../data/indoor/ or ../data_samples/indoor/ depending on the parameter FLAGS.all_data\n* HowTo: This script can be executed independently or via main.py.\n\n* ToDo: LSTM fix, Test evaluation add, Remove dependency for independent open-source. \n* Issues: \n\"\"\"\n\nclass AAAI19Data(Data):\n \"\"\"Data for AAAI19\"\"\"\n def __init__(self, store_id):\n super(AAAI19Data, self).__init__(store_id)\n\n def train_data_generator_AAAI19(self):\n \"\"\" Train data generator for AAAI'19 DRSA.\n Consider: Using each visit separately for training, histories are not considered.\n \"\"\"\n def __gen__():\n while True:\n # Only retain the last visits which includes all previous visits (Retain the last visit for each customer)\n idxs = list(self.df_train.visit_id)\n # np.random.shuffle(idxs)\n df_train = self.df_train.set_index('visit_id')\n train_visits = self.train_visits.set_index('visit_id')\n for idx in idxs:\n visit = train_visits.loc[idx]\n label = df_train.loc[idx]\n yield visit['visit_indices'], visit['area_indices'], \\\n [visit[ft] for ft in self.handcrafted_features], \\\n [label[ft] for ft in ['revisit_intention', 'suppress_time']]\n\n gen = __gen__()\n\n while True:\n batch = [np.stack(x) for x in zip(*(next(gen) for _ in range(FLAGS.batch_size)))]\n self.moke_data_train = np.hstack((batch[0].reshape(-1, 1), batch[1], batch[2])), batch[-1]\n yield self.moke_data_train\n\n def test_data_generator_AAAI19(self):\n \"\"\" Train data generator for AAAI'19 DRSA.\n Similar to train_data_generator_AAAI19()\n \"\"\"\n def __gen__():\n while True:\n idxs = list(self.df_test.visit_id)\n df_all = pd.concat([self.df_train, self.df_test]).set_index('visit_id')\n visits = self.visits.set_index('visit_id')\n for idx in idxs:\n visit = visits.loc[idx]\n label = df_all.loc[idx]\n yield visit['visit_indices'], visit['area_indices'], \\\n [visit[ft] for ft in self.handcrafted_features], \\\n [label[ft] for ft in ['revisit_intention', 'suppress_time']]\n\n gen = __gen__()\n\n while True:\n batch = [np.stack(x) for x in zip(*(next(gen) for _ in range(len(self.test_visits))))]\n self.moke_data_test = np.hstack((batch[0].reshape(-1, 1), batch[1], batch[2])), batch[-1]\n yield self.moke_data_test\n\n def train_censored_data_generator_AAAI19(self):\n \"\"\" Train_censored data generator for AAAI'19 DRSA.\n Similar to train_data_generator_AAAI19()\n \"\"\"\n def __gen__():\n while True:\n idxs = list(self.df_train_censored.visit_id)\n df_train = self.df_train.set_index('visit_id')\n train_visits = self.train_visits.set_index('visit_id')\n for idx in idxs:\n visit = train_visits.loc[idx]\n label = df_train.loc[idx]\n yield visit['visit_indices'], visit['area_indices'], \\\n [visit[ft] for ft in self.handcrafted_features], \\\n [label[ft] for ft in ['revisit_intention', 'suppress_time']]\n gen = __gen__()\n\n while True:\n batch = [np.stack(x) for x in zip(*(next(gen) for _ in range(len(self.censored_visit_id))))]\n self.moke_data_train_censored = np.hstack((batch[0].reshape(-1, 1), batch[1], batch[2])), batch[-1]\n yield self.moke_data_train_censored\n\nclass AAAI19Model(Model):\n def __init__(self, data):\n super(AAAI19Model, self).__init__()\n self.d1 = Dense(40, activation='softmax')\n self.data = data\n self.max_num_areas = np.max(self.data.train_visits.areas.apply(len))\n\n def call(self, single_input):\n user_input = Lambda(lambda x: x[:, 0:1])(single_input)\n area_input = Lambda(lambda x: x[:, 1:-len(self.data.handcrafted_features)])(single_input)\n visit_features_input = Lambda(lambda x: x[:, -len(self.data.handcrafted_features):])(single_input)\n\n # Define some embedding layers\n user_embedding_layer = Embedding(\n input_dim=len(self.data.visit_embedding),\n output_dim=FLAGS.embedding_dim,\n weights=[np.array(list(self.data.visit_embedding.values()))],\n input_length=1,\n trainable=False)\n\n area_embedding_layer = Embedding(\n input_dim=len(self.data.area_embedding),\n output_dim=FLAGS.embedding_dim,\n weights=[np.array(list(self.data.area_embedding.values()))],\n input_length=self.max_num_areas,\n trainable=False)\n\n user_input = user_embedding_layer(user_input)\n area_input = area_embedding_layer(area_input) # Dimension becomes too large?\n\n user_input = Reshape((-1,))(user_input)\n area_input = Reshape((-1,))(area_input)\n\n concat = Concatenate()([user_input, area_input, visit_features_input]) # [u;a;v]\n concat = Dense(128, activation='relu')(concat)\n concat = BatchNormalization()(concat)\n\n expinp = Lambda(lambda x: K.repeat(x, 365))(concat)\n\n # Add time from 1-365\n ones = K.ones_like(expinp[:, :1, :1])\n yseq = K.variable(np.expand_dims(np.array(range(365)) + 0.5, 0))\n yseq = K.dot(ones, yseq)\n yseqd = Lambda(lambda y: K.permute_dimensions(y, (0, 2, 1)))(yseq)\n expinp = Concatenate()([expinp, yseqd])\n # !!!!!!\n # all_areas_lstm = tf.compat.v1.nn.static_rnn(tf.compat.v1.nn.rnn_cell.GRUCell(64), inputs=expinp)\n all_areas_lstm = LSTM(64, return_sequences=True)(expinp)\n # print(\"!!!!!\", expinp)\n logits = TimeDistributed(Dense(1, activation='sigmoid'))(all_areas_lstm)\n logits = Lambda(lambda x: K.squeeze(x, axis=-1))(logits)\n return logits\n\nclass AAAI19():\n def __init__(self, store_id, GPU_id):\n self.store_id = store_id\n self.GPU_id = GPU_id\n self.data = None\n self.train_data = None\n self.test_data = None\n self.train_censored_data = None\n self.d_interval = {'date': {'left': -0.5, 'right': 0.5},\n 'week': {'left': -3.5, 'right': 3.5},\n 'month': {'left': -15, 'right': 15},\n 'season': {'left': -45, 'right': 45}}\n self.tmp_tensor = None\n self.probs_survive = None\n\n def setup(self):\n # # If uncomment, do not use GPU.\n # os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = self.GPU_id\n # print('GPU Available', tf.test.is_gpu_available())\n config = tf.compat.v1.ConfigProto(allow_soft_placement=True)\n config.gpu_options.per_process_gpu_memory_fraction = 0.9\n config.gpu_options.allow_growth = True\n sess = tf.compat.v1.Session(config=config)\n K.set_session(sess)\n\n def train_test(self):\n \"\"\"Using training/testing set & generated model, do learning & prediction\"\"\"\n\n # Data generation\n self.data = AAAI19Data(self.store_id)\n self.data.run()\n self.nfeat = len(self.data.handcrafted_features)\n\n print('Number of areas: {}'.format(len(self.data.area_embedding)))\n\n train_data_size = len(self.data.train_visits)\n test_data_size = len(self.data.test_visits)\n train_censored_data_size = len(self.data.train_censored_visits)\n print(train_data_size, test_data_size, train_censored_data_size)\n\n self.train_data = self.data.train_data_generator_AAAI19()\n self.test_data = self.data.test_data_generator_AAAI19()\n self.train_censored_data = self.data.train_censored_data_generator_AAAI19()\n\n # Generate a model\n self.model = AAAI19Model(data=self.data)\n optimizer = tf.keras.optimizers.Adam()\n\n def calculate_proba(x, interval):\n \"\"\" For calculating negative log likelihood losses for censored data.\"\"\"\n rvbin_label = x[-2] # revisit binary label\n supp_time = x[-1] # revisit suppress time # supp_time = K.cast(K.round(supp_time), dtype='int32')\n kvar_ones = K.ones_like(x[:-2])\n y = subtract([kvar_ones, x[:-2]]) # y = non-revisit rate (1-hazard rate)\n\n left_bin = K.maximum(supp_time + interval['left'], K.ones_like(\n supp_time)) # reason: y[0:x] cannot be calculated when x < 1, therefore set x as 1 so that y[0:1] = 1\n right_bin = K.minimum(supp_time + interval['right'], K.ones_like(\n supp_time) * 365) # reason: y[0:x] cannot be calculated when x > 365\n\n left_bin = K.cast(K.round(left_bin), dtype='int32')\n right_bin = K.cast(K.round(right_bin), dtype='int32')\n supp_time_int = K.cast(K.round(supp_time), dtype='int32')\n\n p_survive_until_linterval = K.prod(y[0:left_bin]) # The instance has to survive for every time step until t\n p_survive_until_rinterval = K.prod(y[0:right_bin])\n p_survive_until_supp_time = K.prod(y[0:supp_time_int])\n\n result = K.stack(\n [p_survive_until_linterval, p_survive_until_rinterval, p_survive_until_supp_time, rvbin_label])\n return result\n\n def precal(label, input):\n uc_loss_nll_option = 'date'\n self.tmp_tensor = K.concatenate([input, label], axis=-1)\n self.probs_survive = K.map_fn(\n functools.partial(calculate_proba, interval=self.d_interval[uc_loss_nll_option]),\n elems=self.tmp_tensor)\n\n def uc_c_loss_ce(label):\n \"\"\"Cross Entropy loss for both cases--censored and uncensored\"\"\"\n final_survive_prob = tf.transpose(self.probs_survive)[2]\n final_revisit_prob = tf.subtract(tf.constant(1.0, dtype=tf.float32), final_survive_prob)\n survive_revisit_prob = tf.transpose(tf.stack([final_survive_prob, final_revisit_prob]), name=\"predict\")\n actual_survive_bin = tf.subtract(tf.constant(1.0, dtype=tf.float32), label[:, -2])\n actual_revisit_bin = label[:, -2]\n revisit_binary_categorical = tf.transpose(tf.stack([actual_survive_bin, actual_revisit_bin]))\n result = -tf.reduce_sum(\n revisit_binary_categorical * tf.math.log(tf.clip_by_value(survive_revisit_prob, 1e-10, 1.0)))\n return result\n\n def uc_loss_nll():\n \"\"\"Negative log-likelihood loss\"\"\"\n prob_revisit_at_z = tf.transpose(self.probs_survive)[0] - tf.transpose(self.probs_survive)[1]\n # If censored -> multiply by 0 -> thus ignored\n prob_revisit_at_z_uncensored = tf.add(tf.multiply(prob_revisit_at_z, tf.transpose(self.probs_survive)[-1]),\n 1e-20)\n result = -tf.reduce_sum(K.log(prob_revisit_at_z_uncensored))\n return result\n\n train_ce_loss = tf.keras.metrics.Mean(name='train_ce_loss')\n train_nll_loss = tf.keras.metrics.Mean(name='train_nll_loss')\n test_ce_loss = tf.keras.metrics.Mean(name='test_ce_loss')\n test_nll_loss = tf.keras.metrics.Mean(name='test_nll_loss')\n train_censored_ce_loss = tf.keras.metrics.Mean(name='train_censored_ce_loss')\n train_censored_nll_loss = tf.keras.metrics.Mean(name='train_censored_nll_loss')\n\n if FLAGS.all_data: # Check more often since the whole data is much big\n steps_per_epoch = train_data_size // (10 * FLAGS.batch_size)\n else:\n steps_per_epoch = train_data_size // FLAGS.batch_size\n\n def train_step(input, label):\n with tf.GradientTape() as tape:\n predictions = self.model(input)\n precal(label, predictions)\n # print(label, input, self.tmp_tensor) # Easy Debugging by Default Eager Execution!\n ce_loss = uc_c_loss_ce(label)\n nll_loss = uc_loss_nll()\n gradients = tape.gradient(ce_loss+0.2*nll_loss, self.model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n train_ce_loss(ce_loss)\n train_nll_loss(nll_loss)\n\n def test_step(input, label):\n predictions = self.model(input)\n precal(label, predictions)\n ce_loss = uc_c_loss_ce(label)\n nll_loss = uc_loss_nll()\n test_ce_loss(ce_loss)\n test_nll_loss(nll_loss)\n\n def train_censored_step(input, label):\n predictions = self.model(input)\n precal(label, predictions)\n ce_loss = uc_c_loss_ce(label)\n nll_loss = uc_loss_nll()\n train_censored_ce_loss(ce_loss)\n train_censored_nll_loss(nll_loss)\n\n for epoch in range(FLAGS.train_epochs):\n step = 0\n onGoing = True\n while onGoing:\n step+=1\n if step < steps_per_epoch:\n onGoing = True\n inputs, labels = next(self.train_data)\n inputs = tf.cast(inputs, tf.float32)\n labels = tf.cast(labels, tf.float32)\n train_step(inputs, labels)\n else:\n onGoing = False\n\n test_inputs, test_labels = next(self.test_data)\n test_inputs = tf.cast(test_inputs, tf.float32)\n test_labels = tf.cast(test_labels, tf.float32)\n test_step(test_inputs, test_labels)\n\n train_censored_inputs, train_censored_labels = next(self.train_censored_data)\n train_censored_inputs = tf.cast(train_censored_inputs, tf.float32)\n train_censored_labels = tf.cast(train_censored_labels, tf.float32)\n train_censored_step(train_censored_inputs, train_censored_labels)\n\n template = 'Epoch {}, Train-CE-Loss: {:4f}, Train-NLL-Loss: {:4f}, Test-CE-Loss: {:4f}, Test-NLL-Loss: {:4f}, Train-censored-CE-Loss: {:4f}, Train-censored-NLL-Loss: {:4f}'\n print(template.format(epoch + 1,\n train_ce_loss.result(),\n train_nll_loss.result(),\n test_ce_loss.result(),\n test_nll_loss.result(),\n train_censored_ce_loss.result(),\n train_censored_nll_loss.result(),\n ))\n\n test_inputs, test_labels = next(self.test_data)\n test_inputs = tf.cast(test_inputs, tf.float32)\n test_labels = tf.cast(test_labels, tf.float32)\n pred_test = self.model(test_inputs)\n # print(K.sum(pred_test, axis=0))\n\n train_censored_inputs, train_censored_labels = next(self.train_censored_data)\n train_censored_inputs = tf.cast(train_censored_inputs, tf.float32)\n train_censored_labels = tf.cast(train_censored_labels, tf.float32)\n pred_train_censored = self.model(train_censored_inputs)\n # print(K.sum(pred_train_censored, axis=0))\n\n eval = Evaluation()\n eval.evaluate(self.data, pred_test)\n eval.evaluate_train_censored(self.data, pred_train_censored)\n print(\"The results of AAAI'19 model are listed as \\\"Our Model\\\" from the above log.\")\n\n def run(self):\n self.setup()\n self.train_test()\n\n\n\nif __name__ == \"__main__\":\n print(\"-----------------------------------------\")\n print(\" Running AAAI'19 code directly \")\n print(\"-----------------------------------------\")\n # gpu_id = input(\"Choose one GPU slot to run (ex. 0, 1, 2, 3, 4, 5, 6, 7 for DGX server)\")\n gpu_id = \"0\"\n aaai19 = AAAI19(store_id=\"store_C\", GPU_id=gpu_id)\n # aaai19 = AAAI19(store_id=FLAGS.store_id, GPU_id=gpu_id)\n aaai19.run()", "sub_path": "survival-revisit-code/keras/aaai19tensorflow2.py", "file_name": "aaai19tensorflow2.py", "file_ext": "py", "file_size_in_byte": 17402, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "data.Data", "line_number": 40, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 66, "usage_type": "call"}, {"api_name": "params.FLAGS.batch_size", "line_number": 66, "usage_type": "attribute"}, {"api_name": "params.FLAGS", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.keras.Model", "line_number": 115, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Lambda", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Lambda", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Lambda", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Embedding", "line_number": 128, "usage_type": "call"}, {"api_name": "params.FLAGS.embedding_dim", "line_number": 130, "usage_type": "attribute"}, {"api_name": "params.FLAGS", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Embedding", "line_number": 135, "usage_type": "call"}, {"api_name": "params.FLAGS.embedding_dim", "line_number": 137, "usage_type": "attribute"}, {"api_name": "params.FLAGS", "line_number": 137, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Reshape", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Reshape", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Concatenate", "line_number": 148, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Lambda", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.repeat", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 152, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.ones_like", "line_number": 155, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 155, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.variable", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 156, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.dot", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 157, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Lambda", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.permute_dimensions", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 158, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Concatenate", "line_number": 159, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.TimeDistributed", "line_number": 164, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 164, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Lambda", "line_number": 165, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.squeeze", "line_number": 165, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 165, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.ConfigProto", "line_number": 188, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 188, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.Session", "line_number": 191, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 191, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.set_session", "line_number": 192, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 192, "usage_type": "name"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 215, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 215, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.ones_like", "line_number": 221, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 221, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.subtract", "line_number": 222, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.maximum", "line_number": 224, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 224, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.ones_like", "line_number": 224, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.minimum", "line_number": 226, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 226, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.ones_like", "line_number": 226, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.cast", "line_number": 229, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 229, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.round", "line_number": 229, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.cast", "line_number": 230, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 230, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.round", "line_number": 230, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.cast", "line_number": 231, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 231, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.round", "line_number": 231, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.prod", "line_number": 233, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 233, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.prod", "line_number": 234, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 234, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.prod", "line_number": 235, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 235, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.stack", "line_number": 237, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 237, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.concatenate", "line_number": 243, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 243, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.map_fn", "line_number": 244, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 244, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 245, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 250, "usage_type": "call"}, {"api_name": "tensorflow.subtract", "line_number": 251, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 251, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 251, "usage_type": "attribute"}, {"api_name": "tensorflow.transpose", "line_number": 252, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 252, "usage_type": "call"}, {"api_name": "tensorflow.subtract", "line_number": 253, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 253, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 253, "usage_type": "attribute"}, {"api_name": "tensorflow.transpose", "line_number": 255, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 255, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 256, "usage_type": "call"}, {"api_name": "tensorflow.math.log", "line_number": 257, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 257, "usage_type": "attribute"}, {"api_name": "tensorflow.clip_by_value", "line_number": 257, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 262, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 264, "usage_type": "call"}, {"api_name": "tensorflow.multiply", "line_number": 264, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 264, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 266, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.log", "line_number": 266, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 266, "usage_type": "name"}, {"api_name": "tensorflow.keras.metrics.Mean", "line_number": 269, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 269, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.Mean", "line_number": 270, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 270, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.Mean", "line_number": 271, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 271, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.Mean", "line_number": 272, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 272, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.Mean", "line_number": 273, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 273, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.Mean", "line_number": 274, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 274, "usage_type": "attribute"}, {"api_name": "params.FLAGS.all_data", "line_number": 276, "usage_type": "attribute"}, {"api_name": "params.FLAGS", "line_number": 276, "usage_type": "name"}, {"api_name": "params.FLAGS.batch_size", "line_number": 277, "usage_type": "attribute"}, {"api_name": "params.FLAGS", "line_number": 277, "usage_type": "name"}, {"api_name": "params.FLAGS.batch_size", "line_number": 279, "usage_type": "attribute"}, {"api_name": "params.FLAGS", "line_number": 279, "usage_type": "name"}, {"api_name": "tensorflow.GradientTape", "line_number": 282, "usage_type": "call"}, {"api_name": "params.FLAGS.train_epochs", "line_number": 309, "usage_type": "attribute"}, {"api_name": "params.FLAGS", "line_number": 309, "usage_type": "name"}, {"api_name": "tensorflow.cast", "line_number": 317, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 317, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 318, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 318, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 324, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 324, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 325, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 325, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 329, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 329, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 330, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 330, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 344, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 344, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 345, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 345, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 350, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 350, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 351, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 351, "usage_type": "attribute"}]} +{"seq_id": "445424303", "text": "from setuptools import setup\n\n# read the contents of your README file\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\nsetup(\n name='PPoop',\n version='0.2.0',\n author='Taylor Perkins',\n author_email='taylorperkins.dev@gmail.com',\n packages=['ppoop', 'ppoop.tests'],\n url='http://pypi.python.org/pypi/PPoop/',\n license='LICENSE.txt',\n description='Procedural Programming for the Object Oriented Programmer.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n install_requires=[\n 'pydantic==0.32.2'\n ]\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 713, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "296235569", "text": "from bs4 import BeautifulSoup as bs\nimport configparser\nimport argparse\nimport MySQLdb\nimport os\nfrom sqlalchemy import create_engine\nimport time\nimport pprint\nimport pandas as pd\nimport numpy as np\nimport re\nimport csv\n\nparser = argparse.ArgumentParser(\n description='Process table parameters')\nparser.add_argument(\n'-d',\ntype=str,\nnargs=1,\nhelp='Download files Directory')\nparser.add_argument(\n'-c',\ntype=str,\nnargs=1,\nhelp='File containing database config in INI format')\nparser.add_argument(\n'-p',\ntype=str,\nnargs=1,\nhelp='Previous database update datestamp')\nparser.add_argument(\n'-n',\ntype=str,\nnargs=1,\nhelp='Next database update datestamp')\nargs = parser.parse_args()\n\nconfig = configparser.ConfigParser()\nconfig.read(args.c[0])\n\nuser=config[\"dev_database\"][\"mysql_db_user\"]\npasswd=config[\"dev_database\"][\"mysql_db_password\"]\nmydb=config[\"dev_database\"][\"mysql_db_host\"]\nport=config[\"dev_database\"][\"mysql_db_port\"]\ndb=config[\"dev_database\"][\"mysql_db_name\"] \n\nconnection_string = 'mysql://' + \\\n str(user) + ':' + str(passwd) + '@' + \\\n str(mydb) + ':' + str(port) + '/' + str(db)\nread_engine = create_engine(connection_string,\n echo=True, encoding='utf-8')\n \nfile_name = args.d[0] \ndata = {}\n\n\nwith open(file_name) as fp:\n\tfor line in fp:\n\t\tsplits=line.split()\n\t\tdiv_count=0;\n\t\tsize_value=int(splits[1]);\n\t\tunit_value=\"bytes\"\n\t\twhile (size_value>1024):\n\t\t\tsize_value=(1.0*size_value)/1024\n\t\t\tdiv_count+=1\n\t\tif div_count==1:\n\t\t\tunit_value=\"KB\"\n\t\telif div_count==2:\n\t\t\tunit_value=\"MB\"\n\t\telif div_count==3:\n\t\t\tunit_value=\"GB\"\n\t\telif div_count==4:\n\t\t\tunit_value=\"TB\"\n\t\telif div_count==5:\n\t\t\tunit_value=\"PB\"\n\t\t\n\t\tdata[splits[0].split(\"\\\\\")[-1].split(\".\")[0]]=str(round(size_value,3))+\" \"+unit_value \n \ninp = open('/code/bulk_downloads_update.txt').read()\ninp = inp.replace(args.p[0],args.n[0])\nsoup = bs(inp)\nrows = soup.findAll('tr')\nfor row in rows:\n td = row.findAll('td')\n try:\n name = td[0].findAll('a')[0].text\n print(name)\n sizespan = td[0].findAll('span')[0]\n sizespan.string =data[name] \n query='select count(*) cnt from '+ name\n count_data = pd.read_sql(query, con=read_engine)\n cc = count_data[\"cnt\"][0]\n cc = str(int(cc))\n td[2].string = \"{:,}\".format(int(cc))\n except:\n pass\n \nHtml_file= open(\"/code/bulk-download-\"+str(args.n[0])+\".html\",\"w\")\nHtml_file.write(str(soup))\nHtml_file.close()\n", "sub_path": "Scripts/Raw_Database_to_Production/bulk-download/metadata-collection/download-metadata.py", "file_name": "download-metadata.py", "file_ext": "py", "file_size_in_byte": 2434, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 50, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 81, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "620297689", "text": "import json\n\nfrom ..util import orm\n\n\nasync def insert_game(episode_id: int, event=None, game_number=None, settings=None, submitted=1):\n await orm.execute(\n 'INSERT INTO tournament_games(episode_id, event, game_number, settings, submitted) VALUES (%s,%s,%s,%s,%s) ON DUPLICATE KEY UPDATE event = %s, game_number = %s, settings = %s, submitted = %s;',\n [episode_id, event, game_number, json.dumps(settings), submitted,\n game_number, event, json.dumps(settings), submitted]\n )\n\n\nasync def get_game_by_episodeid_submitted(episode_id: str):\n results = await orm.select(\n 'SELECT * from tournament_games where episode_id=%s and submitted=1;',\n [episode_id]\n )\n return results[0] if results else None\n\n\nasync def get_game_by_episodeid(episode_id: str):\n results = await orm.select(\n 'SELECT * from tournament_games where episode_id=%s;',\n [episode_id]\n )\n return results[0] if results else None\n\n\nasync def get_all_playoffs():\n results = await orm.select(\n 'SELECT * from tournament_games where submitted=1;'\n )\n return results\n", "sub_path": "alttprbot/database/tournament_games.py", "file_name": "tournament_games.py", "file_ext": "py", "file_size_in_byte": 1115, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "util.orm.execute", "line_number": 7, "usage_type": "call"}, {"api_name": "util.orm", "line_number": 7, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 9, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 10, "usage_type": "call"}, {"api_name": "util.orm.select", "line_number": 15, "usage_type": "call"}, {"api_name": "util.orm", "line_number": 15, "usage_type": "name"}, {"api_name": "util.orm.select", "line_number": 23, "usage_type": "call"}, {"api_name": "util.orm", "line_number": 23, "usage_type": "name"}, {"api_name": "util.orm.select", "line_number": 31, "usage_type": "call"}, {"api_name": "util.orm", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "557618225", "text": "import numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport pdb\n\nclass RunningConfusionMatrix():\n \n def __init__(self, labels=[0,1], ignore_label=255):\n \n self.labels = labels\n self.ignore_label = ignore_label\n self.overall_confusion_matrix = None\n self.n_classes=len(labels)\n \n def update_matrix(self, ground_truth, prediction):\n current_confusion_matrix = confusion_matrix(y_true=ground_truth,\n y_pred=prediction,\n labels=self.labels)\n \n if self.overall_confusion_matrix is not None:\n self.overall_confusion_matrix += current_confusion_matrix\n else:\n self.overall_confusion_matrix = current_confusion_matrix\n\n return current_confusion_matrix\n \n def compute_current_mean_intersection_over_union(self):\n intersection = np.diag(self.overall_confusion_matrix)\n ground_truth_set = self.overall_confusion_matrix.sum(axis=1)\n predicted_set = self.overall_confusion_matrix.sum(axis=0)\n union = ground_truth_set + predicted_set - intersection\n\n intersection_over_union = intersection / union.astype(np.float32)\n mean_intersection_over_union = np.mean(intersection_over_union)\n \n return mean_intersection_over_union\n\n def get_results_cm(self):\n overall=self.get_results(self.overall_confusion_matrix)\n return overall\n\n def get_results(self,cm):\n \"\"\"Returns accuracy score evaluation result.\n - overall accuracy\n - mean accuracy\n - mean IU\n - fwavacc\n \"\"\"\n hist = cm\n\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n # acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n mean_iu = np.nanmean(iu)\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n cls_iu = dict(zip(range(self.n_classes), iu))\n\n return {\n \"Overall Acc\": acc,\n \"Mean Acc\": acc_cls,\n \"FreqW Acc\": fwavacc,\n \"Mean IoU\": mean_iu,\n \"Class IoU\": cls_iu,\n }\n", "sub_path": "Glas/metrics.py", "file_name": "metrics.py", "file_ext": "py", "file_size_in_byte": 2354, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sklearn.metrics.confusion_matrix", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "24230853", "text": "# -*- coding: utf-8 -*-\nimport pytest\nfrom pytest import approx\nimport numpy as np\nfrom scipy.stats import multivariate_normal\n\nfrom ..nonlinear import (\n CartesianToElevationBearingRange, CartesianToBearingRange,\n CartesianToElevationBearing)\nfrom ...base import ReversibleModel\nfrom ....types.state import State\nfrom ....functions import jacobian as compute_jac\nfrom ....types.angle import Bearing, Elevation\nfrom ....types.array import StateVector, Matrix\n\n\ndef h2d(state_vector, translation_offset, rotation_offset):\n\n xyz = [[state_vector[0, 0] - translation_offset[0, 0]],\n [state_vector[1, 0] - translation_offset[1, 0]],\n [0]]\n\n # Get rotation matrix\n theta_z = - rotation_offset[2, 0]\n cos_z, sin_z = np.cos(theta_z), np.sin(theta_z)\n rot_z = np.array([[cos_z, -sin_z, 0],\n [sin_z, cos_z, 0],\n [0, 0, 1]])\n\n theta_y = - rotation_offset[1, 0]\n cos_y, sin_y = np.cos(theta_y), np.sin(theta_y)\n rot_y = np.array([[cos_y, 0, sin_y],\n [0, 1, 0],\n [-sin_y, 0, cos_y]])\n\n theta_x = - rotation_offset[0, 0]\n cos_x, sin_x = np.cos(theta_x), np.sin(theta_x)\n rot_x = np.array([[1, 0, 0],\n [0, cos_x, -sin_x],\n [0, sin_x, cos_x]])\n\n rotation_matrix = rot_z@rot_y@rot_x\n\n xyz_rot = rotation_matrix @ xyz\n x = xyz_rot[0, 0]\n y = xyz_rot[1, 0]\n # z = 0 # xyz_rot[2, 0]\n\n rho = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n\n return np.array([[Bearing(phi)], [rho]])\n\n\ndef h3d(state_vector, translation_offset, rotation_offset):\n\n xyz = [[state_vector[0, 0] - translation_offset[0, 0]],\n [state_vector[1, 0] - translation_offset[1, 0]],\n [state_vector[2, 0] - translation_offset[2, 0]]]\n\n # Get rotation matrix\n theta_z = - rotation_offset[2, 0]\n cos_z, sin_z = np.cos(theta_z), np.sin(theta_z)\n rot_z = np.array([[cos_z, -sin_z, 0],\n [sin_z, cos_z, 0],\n [0, 0, 1]])\n\n theta_y = - rotation_offset[1, 0]\n cos_y, sin_y = np.cos(theta_y), np.sin(theta_y)\n rot_y = np.array([[cos_y, 0, sin_y],\n [0, 1, 0],\n [-sin_y, 0, cos_y]])\n\n theta_x = - rotation_offset[0, 0]\n cos_x, sin_x = np.cos(theta_x), np.sin(theta_x)\n rot_x = np.array([[1, 0, 0],\n [0, cos_x, -sin_x],\n [0, sin_x, cos_x]])\n\n rotation_matrix = rot_z@rot_y@rot_x\n\n xyz_rot = rotation_matrix @ xyz\n x = xyz_rot[0, 0]\n y = xyz_rot[1, 0]\n z = xyz_rot[2, 0]\n\n rho = np.sqrt(x**2 + y**2 + z**2)\n phi = np.arctan2(y, x)\n theta = np.arcsin(z/rho)\n\n return np.array([[Elevation(theta)], [Bearing(phi)], [rho]])\n\n\ndef hbearing(state_vector, translation_offset, rotation_offset):\n xyz = [[state_vector[0, 0] - translation_offset[0, 0]],\n [state_vector[1, 0] - translation_offset[1, 0]],\n [state_vector[2, 0] - translation_offset[2, 0]]]\n\n # Get rotation matrix\n theta_z = - rotation_offset[2, 0]\n cos_z, sin_z = np.cos(theta_z), np.sin(theta_z)\n rot_z = np.array([[cos_z, -sin_z, 0],\n [sin_z, cos_z, 0],\n [0, 0, 1]])\n\n theta_y = - rotation_offset[1, 0]\n cos_y, sin_y = np.cos(theta_y), np.sin(theta_y)\n rot_y = np.array([[cos_y, 0, sin_y],\n [0, 1, 0],\n [-sin_y, 0, cos_y]])\n\n theta_x = - rotation_offset[0, 0]\n cos_x, sin_x = np.cos(theta_x), np.sin(theta_x)\n rot_x = np.array([[1, 0, 0],\n [0, cos_x, -sin_x],\n [0, sin_x, cos_x]])\n\n rotation_matrix = rot_z@rot_y@rot_x\n\n xyz_rot = rotation_matrix @ xyz\n x = xyz_rot[0, 0]\n y = xyz_rot[1, 0]\n z = xyz_rot[2, 0]\n\n rho = np.sqrt(x**2 + y**2 + z**2)\n phi = np.arctan2(y, x)\n theta = np.arcsin(z/rho)\n\n return np.array([[Elevation(theta)], [Bearing(phi)]])\n\n\n@pytest.mark.parametrize(\n \"h, ModelClass, state_vec, R , mapping,\\\n translation_offset, rotation_offset\",\n [\n ( # 2D meas, 2D state\n h2d,\n CartesianToBearingRange,\n np.array([[0], [1]]),\n np.array([[0.015, 0],\n [0, 0.1]]),\n np.array([0, 1]),\n np.array([[1], [-1]]),\n np.array([[0], [0], [1]])\n\n ),\n ( # 3D meas, 3D state\n h3d,\n CartesianToElevationBearingRange,\n np.array([[1], [2], [2]]),\n np.array([[0.05, 0, 0],\n [0, 0.015, 0],\n [0, 0, 0.1]]),\n np.array([0, 1, 2]),\n np.array([[0], [0], [0]]),\n np.array([[.2], [3], [-1]])\n ),\n ( # 2D meas, 3D state\n hbearing,\n CartesianToElevationBearing,\n np.array([[1], [2], [3]]),\n np.array([[0.05, 0],\n [0, 0.015]]),\n np.array([0, 1, 2]),\n np.array([[0], [0], [0]]),\n np.array([[-3], [0], [np.pi/3]])\n )\n ],\n ids=[\"standard\", \"RBE\", \"BearingsOnly\"]\n)\ndef test_models(h, ModelClass, state_vec, R,\n mapping, translation_offset, rotation_offset):\n \"\"\" CartesianToBearingRange Measurement Model test \"\"\"\n\n ndim_state = state_vec.size\n state = State(state_vec)\n\n # Create and a measurement model object\n model = ModelClass(ndim_state=ndim_state,\n mapping=mapping,\n noise_covar=R,\n translation_offset=translation_offset,\n rotation_offset=rotation_offset)\n\n # Project a state through the model\n # (without noise)\n meas_pred_wo_noise = model.function(state, noise=0)\n eval_m = h(state_vec, model.translation_offset, model.rotation_offset)\n assert np.array_equal(meas_pred_wo_noise, eval_m)\n\n # Ensure ```lg.transfer_function()``` returns H\n def fun(x):\n return model.function(x, noise=0)\n H = compute_jac(fun, state)\n assert np.array_equal(H, model.jacobian(state))\n\n # Check Jacobian has proper dimensions\n assert H.shape == (model.ndim_meas, ndim_state)\n\n # Ensure inverse function returns original\n if isinstance(model, ReversibleModel):\n J = model.inverse_function(State(meas_pred_wo_noise))\n assert np.allclose(J, state_vec)\n\n # Ensure ```lg.covar()``` returns R\n assert np.array_equal(R, model.covar())\n\n # Ensure model creates noise\n rvs = model.rvs()\n assert rvs.shape == (model.ndim_meas, 1)\n assert isinstance(rvs, StateVector)\n rvs = model.rvs(10)\n assert rvs.shape == (model.ndim_meas, 10)\n assert isinstance(rvs, Matrix)\n # StateVector is subclass of Matrix, so need to check explicitly.\n assert not isinstance(rvs, StateVector)\n\n # Project a state throught the model\n # Project a state through the model\n # (without noise)\n meas_pred_wo_noise = model.function(state, noise=0)\n assert np.array_equal(meas_pred_wo_noise, h(\n state_vec, model.translation_offset, model.rotation_offset))\n\n # Evaluate the likelihood of the predicted measurement, given the state\n # (without noise)\n prob = model.pdf(State(meas_pred_wo_noise), state)\n assert approx(prob) == multivariate_normal.pdf(\n meas_pred_wo_noise.T,\n mean=np.array(h(state_vec,\n model.translation_offset,\n model.rotation_offset)).ravel(),\n cov=R)\n\n # Propagate a state vector through the model\n # (with internal noise)\n meas_pred_w_inoise = model.function(state)\n assert not np.array_equal(\n meas_pred_w_inoise, h(state_vec, model.translation_offset,\n model.rotation_offset))\n\n # Evaluate the likelihood of the predicted state, given the prior\n # (with noise)\n prob = model.pdf(State(meas_pred_w_inoise), state)\n assert approx(prob) == multivariate_normal.pdf(\n meas_pred_w_inoise.T,\n mean=np.array(h(state_vec,\n model.translation_offset,\n model.rotation_offset)).ravel(),\n cov=R)\n\n # Propagate a state vector throught the model\n # (with external noise)\n noise = model.rvs()\n meas_pred_w_enoise = model.function(state,\n noise=noise)\n assert np.array_equal(meas_pred_w_enoise, h(\n state_vec, model.translation_offset, model.rotation_offset)+noise)\n\n # Evaluate the likelihood of the predicted state, given the prior\n # (with noise)\n prob = model.pdf(State(meas_pred_w_enoise), state)\n assert approx(prob) == multivariate_normal.pdf(\n meas_pred_w_enoise.T,\n mean=np.array(h(state_vec,\n model.translation_offset,\n model.rotation_offset)).ravel(),\n cov=R)\n", "sub_path": "Chloe-Stone-Soup/Stone Soup Tutorials as PY Scripts/stonesoup/models/measurement/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 8880, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.cos", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "types.angle.Bearing", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.arcsin", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "types.angle.Elevation", "line_number": 91, "usage_type": "call"}, {"api_name": "types.angle.Bearing", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.arcsin", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "types.angle.Elevation", "line_number": 129, "usage_type": "call"}, {"api_name": "types.angle.Bearing", "line_number": 129, "usage_type": "call"}, {"api_name": "types.state.State", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 189, "usage_type": "call"}, {"api_name": "functions.jacobian", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 195, "usage_type": "call"}, {"api_name": "base.ReversibleModel", "line_number": 201, "usage_type": "argument"}, {"api_name": "types.state.State", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 206, "usage_type": "call"}, {"api_name": "types.array.StateVector", "line_number": 211, "usage_type": "argument"}, {"api_name": "types.array.Matrix", "line_number": 214, "usage_type": "argument"}, {"api_name": "types.array.StateVector", "line_number": 216, "usage_type": "argument"}, {"api_name": "numpy.array_equal", "line_number": 222, "usage_type": "call"}, {"api_name": "types.state.State", "line_number": 227, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 228, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal.pdf", "line_number": 228, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 228, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 238, "usage_type": "call"}, {"api_name": "types.state.State", "line_number": 244, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 245, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal.pdf", "line_number": 245, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 245, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 257, "usage_type": "call"}, {"api_name": "types.state.State", "line_number": 262, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 263, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal.pdf", "line_number": 263, "usage_type": "call"}, {"api_name": "scipy.stats.multivariate_normal", "line_number": 263, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 265, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 132, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 132, "usage_type": "attribute"}, {"api_name": "nonlinear.CartesianToBearingRange", "line_number": 138, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 144, "usage_type": "call"}, {"api_name": "nonlinear.CartesianToElevationBearingRange", "line_number": 149, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}, {"api_name": "nonlinear.CartesianToElevationBearing", "line_number": 160, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 166, "usage_type": "attribute"}]} +{"seq_id": "212174082", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0011_auto_20151102_0739'),\n ('tickets', '0015_auto_20151102_0731'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='cars',\n name='res',\n ),\n migrations.AddField(\n model_name='cars',\n name='res',\n field=models.ManyToManyField(verbose_name='РЭС', to='users.Res'),\n ),\n ]\n", "sub_path": "tickets/migrations/0016_auto_20151102_0803.py", "file_name": "0016_auto_20151102_0803.py", "file_ext": "py", "file_size_in_byte": 569, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "603642974", "text": "from __future__ import unicode_literals\n\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom clld.tests.util import TestWithDb\nfrom clld.db.models.common import Language\nfrom clld.db.meta import DBSession, VersionedDBSession\n\n\nclass Tests(TestWithDb):\n\n def test_JSONEncodedDict(self):\n l = Language(id='abc', name='Name', jsondata={'i': 2})\n DBSession.add(l)\n DBSession.flush()\n\n DBSession.expunge(l)\n for lang in DBSession.query(Language).filter(Language.id == 'abc'):\n self.assertEqual(lang.jsondata['i'], 2)\n break\n\n def test_CustomModelMixin(self):\n from clld.tests.fixtures import CustomLanguage\n\n DBSession.add(CustomLanguage(id='abc', name='Name', custom='c'))\n DBSession.flush()\n for lang in DBSession.query(Language).filter(Language.id == 'abc'):\n self.assertEqual(lang.custom, 'c')\n self.assertTrue('custom_t' in lang.__solr__(None))\n break\n\n def test_Base(self):\n l = Language(id='abc', name='Name')\n VersionedDBSession.add(l)\n VersionedDBSession.flush()\n VersionedDBSession.expunge(l)\n l = Language.get('abc')\n self.assertEqual(l.name, 'Name')\n assert not list(l.history())\n\n # a bit of a hack to test the human readable representations.\n # we exploit the fact, that on py2, string and unicode comparison does type\n # coercion, while on py3, the two methods should actually return the same string.\n self.assertEqual(l.__str__(), l.__unicode__())\n Language().__str__()\n\n def test_Base_jsondata(self):\n l = Language(id='abc', name='Name')\n VersionedDBSession.add(l)\n VersionedDBSession.flush()\n l.update_jsondata(a=1)\n self.assertTrue('a' in l.jsondata)\n l.update_jsondata(b=1)\n self.assertTrue('b' in l.jsondata and 'a' in l.jsondata)\n self.assertTrue('b' in l.__json__(None)['jsondata'])\n\n def test_Base_get(self):\n self.assertEqual(42, Language.get('doesntexist', default=42))\n self.assertRaises(NoResultFound, Language.get, 'doesntexist')\n", "sub_path": "clld/tests/test_db_meta.py", "file_name": "test_db_meta.py", "file_ext": "py", "file_size_in_byte": 2140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "clld.tests.util.TestWithDb", "line_number": 10, "usage_type": "name"}, {"api_name": "clld.db.models.common.Language", "line_number": 13, "usage_type": "call"}, {"api_name": "clld.db.meta.DBSession.add", "line_number": 14, "usage_type": "call"}, {"api_name": "clld.db.meta.DBSession", "line_number": 14, "usage_type": "name"}, {"api_name": "clld.db.meta.DBSession.flush", "line_number": 15, "usage_type": "call"}, {"api_name": "clld.db.meta.DBSession", "line_number": 15, "usage_type": "name"}, {"api_name": "clld.db.meta.DBSession.expunge", "line_number": 17, "usage_type": "call"}, {"api_name": "clld.db.meta.DBSession", "line_number": 17, "usage_type": "name"}, {"api_name": "clld.db.meta.DBSession.query", "line_number": 18, "usage_type": "call"}, {"api_name": "clld.db.models.common.Language", "line_number": 18, "usage_type": "argument"}, {"api_name": "clld.db.meta.DBSession", "line_number": 18, "usage_type": "name"}, {"api_name": "clld.db.models.common.Language.id", "line_number": 18, "usage_type": "attribute"}, {"api_name": "clld.db.meta.DBSession.add", "line_number": 25, "usage_type": "call"}, {"api_name": "clld.db.meta.DBSession", "line_number": 25, "usage_type": "name"}, {"api_name": "clld.tests.fixtures.CustomLanguage", "line_number": 25, "usage_type": "call"}, {"api_name": "clld.db.meta.DBSession.flush", "line_number": 26, "usage_type": "call"}, {"api_name": "clld.db.meta.DBSession", "line_number": 26, "usage_type": "name"}, {"api_name": "clld.db.meta.DBSession.query", "line_number": 27, "usage_type": "call"}, {"api_name": "clld.db.models.common.Language", "line_number": 27, "usage_type": "argument"}, {"api_name": "clld.db.meta.DBSession", "line_number": 27, "usage_type": "name"}, {"api_name": "clld.db.models.common.Language.id", "line_number": 27, "usage_type": "attribute"}, {"api_name": "clld.db.models.common.Language", "line_number": 33, "usage_type": "call"}, {"api_name": "clld.db.meta.VersionedDBSession.add", "line_number": 34, "usage_type": "call"}, {"api_name": "clld.db.meta.VersionedDBSession", "line_number": 34, "usage_type": "name"}, {"api_name": "clld.db.meta.VersionedDBSession.flush", "line_number": 35, "usage_type": "call"}, {"api_name": "clld.db.meta.VersionedDBSession", "line_number": 35, "usage_type": "name"}, {"api_name": "clld.db.meta.VersionedDBSession.expunge", "line_number": 36, "usage_type": "call"}, {"api_name": "clld.db.meta.VersionedDBSession", "line_number": 36, "usage_type": "name"}, {"api_name": "clld.db.models.common.Language.get", "line_number": 37, "usage_type": "call"}, {"api_name": "clld.db.models.common.Language", "line_number": 37, "usage_type": "name"}, {"api_name": "clld.db.models.common.Language", "line_number": 45, "usage_type": "call"}, {"api_name": "clld.db.models.common.Language", "line_number": 48, "usage_type": "call"}, {"api_name": "clld.db.meta.VersionedDBSession.add", "line_number": 49, "usage_type": "call"}, {"api_name": "clld.db.meta.VersionedDBSession", "line_number": 49, "usage_type": "name"}, {"api_name": "clld.db.meta.VersionedDBSession.flush", "line_number": 50, "usage_type": "call"}, {"api_name": "clld.db.meta.VersionedDBSession", "line_number": 50, "usage_type": "name"}, {"api_name": "clld.db.models.common.Language.get", "line_number": 58, "usage_type": "call"}, {"api_name": "clld.db.models.common.Language", "line_number": 58, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 59, "usage_type": "argument"}, {"api_name": "clld.db.models.common.Language.get", "line_number": 59, "usage_type": "attribute"}, {"api_name": "clld.db.models.common.Language", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "167072266", "text": "\n\"\"\"\nCopyright 2017-2018 Agnese Salutari.\nLicensed under the Apache License, Version 2.0 (the \"License\"); \nyou may not use this file except in compliance with the License. \nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on \nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \nSee the License for the specific language governing permissions and limitations under the License\n\"\"\"\n\nimport lindaproxy as lp\nimport redis\n\n\ndef makeAtomic(s):\n out = s.replace('(', 'A')\n out = out.replace(')', 'B')\n out = out.replace('[', 'C')\n out = out.replace(']', 'D')\n out = out.replace('.', 'E')\n out = out.replace(',', 'F')\n out = out.replace('/', 'G')\n out = out.replace('\\\\', 'H')\n out = out.replace(\"'\", 'I')\n out = out.replace(' ', 'O')\n out = out.replace(':', 'J')\n return out\n\n\n# used to send message to the DALI MAS\nL = lp.LindaProxy(host='127.0.0.1')\nL.connect()\n\n# prepare and forward the messages to the MAS\nR = redis.Redis()\npubsub = R.pubsub()\npubsub.subscribe('LINDAchannel')\nprint('listening on LINDAchannel...')\nfor item in pubsub.listen():\n if item['type']=='message':\n msg = item['data'].decode('utf-8')\n separator = msg.index(':')\n # get addressee\n addressee = msg[:separator]\n # remove addressee from the message body\n msg = msg[separator+1:]\n atomic = makeAtomic(msg)\n print('--- redis event ---')\n print('addressee: {}'.format(addressee))\n print('message: {}'.format(msg))\n print('atomic: {}'.format(atomic))\n L.send_message(addressee, \"redis(\" + atomic + \")\")\n", "sub_path": "code/LindaProxy/Redis2LINDA.py", "file_name": "Redis2LINDA.py", "file_ext": "py", "file_size_in_byte": 1781, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "lindaproxy.LindaProxy", "line_number": 35, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "440970357", "text": "import sys\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QApplication, QLabel, QListWidget, QScrollBar, \\\n QWidget, QListWidgetItem, QPushButton, QMainWindow, QFrame\nfrom PyQt5.QtGui import QIcon, QPixmap, QImage, QPalette, QBrush, QMovie, \\\n QPainter, QTransform, QPen, QColor\nfrom PyQt5.QtCore import QSize, QObject, pyqtSignal, Qt, QTimer\n\nfrom backend import NumeroImagen, Fondo, InicioChecker, ruta_colores,\\\n ruta_imagenes, completar_numero, extraer_numero, ruta_sprites, \\\n diccionario_colores, ruta_fondos, ruta_gifs, Poder\nimport time\nfrom excepciones import VolverMenu\nimport itertools\n\nclass Etiqueta(QLabel):\n def __init__(self, parent=None):\n super(Etiqueta, self).__init__(parent=parent)\n self.rastro = [[]]\n self.pen = QPen(QColor(\"#FFFFFF\"), 3, Qt.SolidLine)\n self.cuadro = QPixmap()\n\n\n def paintEvent(self, e):\n paint = QPainter(self)\n paint.setPen(self.pen)\n\n for n in range(0, len(self.rastro), 2):\n x_prev, y_prev = self.rastro[n][0] if self.rastro[n] else (0, 0)\n for x, y in self.rastro[n]:\n paint.drawLine(x_prev + 14, y_prev + 14, x + 14, y + 14)\n x_prev, y_prev = x, y\n self.update()\n\nclass Juego:\n def __init__(self, partida, datos_jefe):\n self.partida = partida.datos()\n self.datos_jefe = datos_jefe\n\ngame_name, game_class = uic.loadUiType(\"game.ui\")\n\nclass Game(game_name, game_class):\n move_character_signal = pyqtSignal(str)\n move_guest_signal = pyqtSignal(str)\n\n def __init__(self, cliente=\"\", sala_espera=\"\"):\n super().__init__()\n\n self.cliente = cliente\n self.sala = sala_espera\n\n self.jugador_nombre = self.sala.usuario\n self.invitado_nombre = self.sala.invitado\n\n self.setupUi(self)\n self.game_zone.setPixmap(QPixmap(f\"{ruta_fondos}/mapa_black.png\"))\n self.mostrar_botones_elegir()\n self.boton_marcado = None\n self.keyPressEvent = self.elegir_teclas\n self.text_esperando.hide()\n self.fondo_elegir_teclas.setPixmap(QPixmap(f\"{ruta_colores}/claro.png\"))\n self.button_listo_teclas.clicked.connect(self.teclas_listas)\n imagen = QImage(f\"{ruta_fondos}/fondo_juego.png\")\n fondo = imagen.scaled(QSize(640, 360)) # resize\n palette = QPalette()\n palette.setBrush(10, QBrush(fondo)) # 10 = Windowrole\n self.setPalette(palette)\n self.cargando = QMovie(f\"{ruta_gifs}/Nyancat.gif\")\n self.cargando.setScaledSize(QSize(200, 200))\n self.paintEvent = self.mostrar_gif\n self.cargando.frameChanged.connect(self.repaint)\n self.nombres_puntajes = [(self.text_jugador_1, self.text_puntaje_1),\n (self.text_jugador_2, self.text_puntaje_2),\n (self.text_jugador_3, self.text_puntaje_3),\n (self.text_jugador_4, self.text_puntaje_4)]\n \n\n\n self.grupo_juego.hide()\n print(\"MOSTRADO GAME\")\n self.juego = None\n self.jugadores_label = dict()\n print(\"jugadores labels listos\")\n self.lista_labels = [self.label_1, self.label_2, self.label_3,\n self.label_4]\n print(\"labels listos\")\n # self.lista_etiquetas = [Etiqueta(self), Etiqueta(self), Etiqueta(\n # self), Etiqueta(self)]\n print(\"jeje\")\n self.jugadores_etiquetas = dict()\n # for etiqueta in self.lista_etiquetas:\n # etiqueta.resize(630, 350)\n # etiqueta.hide()\n print(\"etiquetas listas\")\n self._mapa = dict()\n for x in range(150, 640, 3):\n for y in range(21, 360, 3):\n label = QLabel(self)\n label.move(x, y)\n label.resize(3 * 2, 3 * 2)\n self._mapa[(x + 1, y + 1)] = label\n self._mapa[(x + 1, y + 1)].hide()\n print(\"agregado:\", x+1, y+1)\n self.button_pausa.clicked.connect(self.pausar)\n\n self.button_salir.clicked.connect(self.volver_menu)\n\n self.label_poderes = {self.label_poder_1: False, self.label_poder_2:\n False, self.label_poder_3: False, self.label_poder_4: False}\n\n def pausar(self):\n self.move_character_signal.emit('E')\n\n def volver_menu(self):\n self.hide()\n self.cliente.restart()\n\n def mapa(self, x, y):\n grupo_pixeles = 3\n tamaño_pixeles = 6\n x_new = ((x // grupo_pixeles) * grupo_pixeles) + grupo_pixeles // 2\n y_new = ((y // grupo_pixeles) * grupo_pixeles) + grupo_pixeles // 2\n return self._mapa[(int(x_new), int(y_new))]\n\n def pintar(self, x, y, color):\n a_pintar = self.mapa(x, y)\n a_pintar.setStyleSheet(f\"background-color:{color}\")\n if a_pintar.isHidden():\n a_pintar.show()\n\n\n def mover(self, event):\n self.cliente.send({\"estado\": \"mover\", \"contenido\": event})\n\n def mover_invitado(self, event):\n self.cliente.send({\"estado\": \"mover_invitado\", \"contenido\": event})\n\n def _keyPressEvent(self, e):\n \"\"\"\n Dada la presión de una tecla se llama a esta función. Al apretarse\n una tecla chequeamos si\n esta dentro de las teclas del control del juego y de ser así, se\n envía una señal al backend\n con la acción además de actualizar el sprite.\n :param e: QKeyEvent\n :return:\n \"\"\"\n\n #self.frame += 1\n if e.key() == self.botones_elegidos[\"JR\"]:\n self.move_character_signal.emit(\"R\")\n if e.key() == self.botones_elegidos[\"JL\"]:\n self.move_character_signal.emit(\"L\")\n if e.key() == self.botones_elegidos[\"IR\"]:\n self.move_guest_signal.emit(\"R\")\n if e.key() == self.botones_elegidos[\"IL\"]:\n self.move_guest_signal.emit(\"L\")\n if e.key() == Qt.Key_Space:\n self.move_character_signal.emit('E')\n\n def _keyReleaseEvent(self, e):\n if e.key() == self.botones_elegidos[\"JR\"] and not e.isAutoRepeat():\n self.move_character_signal.emit('U')\n if e.key() == self.botones_elegidos[\"JL\"] and not e.isAutoRepeat():\n self.move_character_signal.emit('U')\n if e.key() == self.botones_elegidos[\"IR\"] and not e.isAutoRepeat():\n self.move_character_signal.emit('U')\n if e.key() == self.botones_elegidos[\"IL\"] and not e.isAutoRepeat():\n self.move_character_signal.emit('U')\n\n def mostrar(self):\n self.show()\n\n\n\n def comenzar_juego(self):\n self.grupo_elegir_teclas.hide()\n self.fondo_elegir_teclas.hide()\n self.text_esperando.hide()\n self.grupo_juego.show()\n\n def mostrar_jugadores(self, juego):\n self.juego = juego\n self.text_puntaje_max.setText(f\"Puntaje Máximo: \"\n f\"{self.juego.puntaje_max}\")\n for jugadores, casillas in zip(self.juego.players.items(),\n self.nombres_puntajes):\n nombre, player = jugadores\n cuadro, cuadro_puntaje = casillas\n if nombre == self.jugador_nombre:\n self.jugador = player\n elif nombre == self.invitado_nombre:\n self.invitado = player\n label = self.lista_labels.pop(0)\n label.setPixmap(QPixmap(f\"{ruta_sprites}/color_\"\n f\"{player.color}.png\").scaledToWidth(\n 6).scaledToHeight(6).transformed(\n QTransform().rotate(player.angle)))\n label.move(player.x, player.y)\n # self.jugadores_etiquetas[nombre] = self.lista_etiquetas.pop(0)\n # self.jugadores_etiquetas[nombre].pen = QPen(QColor(\n # diccionario_colores[f\"color_{player.color}\"]), 3, Qt.SolidLine)\n # self.jugadores_etiquetas[nombre].show()\n self.jugadores_label[nombre] = label\n self.keyPressEvent = self._keyPressEvent\n self.keyReleaseEvent = self._keyReleaseEvent\n\n self.move_character_signal.connect(self.mover)\n self.move_guest_signal.connect(self.mover_invitado)\n cuadro.setText(nombre)\n color = diccionario_colores[f\"color_{player.color}\"]\n cuadro.setStyleSheet(f\"background-color: {color}\")\n cuadro_puntaje.setText(str(player.puntaje))\n\n\n def actualizar_mapa(self, juego):\n self.juego = juego\n for nombre, player in self.juego.players.items():\n label = self.jugadores_label[nombre]\n if player.choco:\n label.hide()\n else:\n label.setPixmap(QPixmap(f\"{ruta_sprites}/color_\"\n f\"{player.color}.png\").scaledToWidth(\n 6).scaledToHeight(6).transformed(\n QTransform().rotate(player.angle)))\n label.move(player.x, player.y)\n if len(player.rastro) % 2:\n self.pintar(player.x, player.y, diccionario_colores[\n f\"color_{player.color}\"])\n\n for poder, label_poder in itertools.zip_longest(\n self.juego.lista_poderes, self.label_poderes):\n if poder is not None:\n label_poder.move(poder.x, poder.y)\n label_poder.setPixmap(QPixmap(f\"{ruta_sprites}/poder_\"\n f\"{poder.tipo}.png\").scaledToWidth(\n 12).scaledToHeight(12))\n if label_poder.isHidden():\n label_poder.show()\n else:\n if not label_poder.isHidden():\n label_poder.hide()\n\n\n #self.jugadores_etiquetas[nombre].rastro = player.rastro\n\n\n\n\n def _paintEvent(self, event):\n currentFrame = self.cargando.currentPixmap()\n frameRect = currentFrame.rect()\n frameRect.moveCenter(self.rect().center())\n if frameRect.intersects(event.rect()):\n painter = QPainter(self)\n painter.drawPixmap(frameRect.left(), frameRect.top(), currentFrame)\n\n def mostrar_gif(self, event):\n '''https://stackoverflow.com/questions/\n 41709464/python-pyqt-add-background-gif'''\n\n currentFrame = self.cargando.currentPixmap()\n frameRect = currentFrame.rect()\n frameRect.moveCenter(self.rect().center())\n if frameRect.intersects(event.rect()):\n painter = QPainter(self)\n painter.drawPixmap(220, 90, currentFrame)\n\n\n def teclas_listas(self):\n self.grupo_elegir_teclas.hide()\n self.text_esperando.show()\n if self.sala.n_invitado:\n self.cliente.send({\"estado\": \"teclas_listas\", \"contenido\": (\n self.sala.usuario, self.sala.invitado)})\n else:\n self.cliente.send({\"estado\": \"teclas_listas\", \"contenido\": (\n self.sala.usuario,)})\n\n self.cargando.start()\n\n\n def eventFilter(self, source, event):\n return super(Game, self).eventFilter(source, event)\n\n def mostrar_botones_elegir(self):\n botones_elegir = (self.button_izquierda_jugador,\n self.button_derecha_jugador,\n self.button_izquierda_invitado,\n self.button_derecha_invitado)\n botones_elegir_nombres = (\"JL\", \"JR\", \"IL\", \"IR\")\n self.botones_elegir = dict({boton: nombre for boton, nombre in zip(\n botones_elegir, botones_elegir_nombres)})\n self.botones_elegidos = {\"JL\": 16777234, \"JR\": 16777236,\n \"IL\": 65, \"IR\": 68}\n for boton in botones_elegir:\n '''https://stackoverflow.com/questions/\n 24925631/disable-pyqt-arrow-key-focus'''\n boton.setFocusPolicy(Qt.NoFocus)\n boton.clicked.connect(self.marcar_boton_elegir)\n self.button_listo_teclas.setFocusPolicy(Qt.NoFocus)\n\n\n def marcar_boton_elegir(self):\n boton = self.sender()\n self.boton_marcado = boton\n\n def elegir_teclas(self, e):\n '''http://www.naturalprogramming.com/pythonqt/KeyboardInputDemoQt.py'''\n if self.boton_marcado is not None and e.key() not in \\\n self.botones_elegidos.values():\n numero_tecla = e.key()\n if numero_tecla < 256:\n string_tecla = \"%c\" % numero_tecla\n elif e.key() == Qt.Key_Up:\n string_tecla = \"Up\"\n elif e.key() == Qt.Key_Down:\n string_tecla = \"Down\"\n elif e.key() == Qt.Key_Left:\n string_tecla = \"Left\"\n elif e.key() == Qt.Key_Right:\n string_tecla = \"Right\"\n else:\n return\n self.botones_elegidos[\n self.botones_elegir[self.boton_marcado]] = e.key()\n self.boton_marcado.setText(string_tecla)\n\n\n\nclass MainWindow2(QMainWindow):\n def __init__(self, parent=None):\n super(MainWindow2, self).__init__(parent)\n self.setGeometry(50, 50, 600, 750)\n self.setFixedSize(600, 750)\n '''\n self.movie = QMovie(\"imagenes/gifs/rafita.gif\")\n self.paintEvent = self._paintEvent\n print(self.repaint, QMainWindow.repaint)\n self.movie.frameChanged.connect(self.repaint)\n \n '''\n self.startUIWindow()\n\n def startUIWindow(self):\n self.Window = UIWindow(self)\n self.setWindowTitle(\"My Program\")\n self.show()\n\n def _paintEvent(self, event):\n currentFrame = self.movie.currentPixmap()\n frameRect = currentFrame.rect()\n frameRect.moveCenter(self.rect().center())\n if frameRect.intersects(event.rect()):\n painter = QPainter(self)\n painter.drawPixmap(frameRect.left(), frameRect.top(), currentFrame)\n\nclass UIWindow(QWidget):\n\n def __init__(self, parent=None):\n super(UIWindow, self).__init__(parent)\n self.resize(QSize(600, 750))\n self.ToolsBTN = QPushButton('tab', self)\n self.ToolsBTN.resize(100, 40)\n self.ToolsBTN.move(60, 300)\n\n self.CPS = QPushButton('tab1', self)\n self.CPS.resize(100, 40)\n self.CPS.move(130, 600)\n\n self.Creator = QPushButton('tab2', self)\n self.Creator.resize(100, 40)\n self.Creator.move(260, 50)\n\n\nif __name__ == \"__main__\":\n app = QApplication([])\n juego = Game()\n juego.show()\n sys.exit(app.exec_())\n\n print(\"saliendo :D gracias c:\")\n\n", "sub_path": "Tareas/T04/cliente/juego.py", "file_name": "juego.py", "file_ext": "py", "file_size_in_byte": 14571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 16, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPen", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.SolidLine", "line_number": 20, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 20, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPainter", "line_number": 25, "usage_type": "call"}, {"api_name": "PyQt5.uic.loadUiType", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 40, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 43, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 44, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 56, "usage_type": "call"}, {"api_name": "backend.ruta_fondos", "line_number": 56, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 61, "usage_type": "call"}, {"api_name": "backend.ruta_colores", "line_number": 61, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 63, "usage_type": "call"}, {"api_name": "backend.ruta_fondos", "line_number": 63, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QSize", "line_number": 64, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPalette", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QBrush", "line_number": 66, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QMovie", "line_number": 68, "usage_type": "call"}, {"api_name": "backend.ruta_gifs", "line_number": 68, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QSize", "line_number": 69, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 98, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.Key_Space", "line_number": 158, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 158, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 195, "usage_type": "call"}, {"api_name": "backend.ruta_sprites", "line_number": 195, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QTransform", "line_number": 198, "usage_type": "call"}, {"api_name": "backend.diccionario_colores", "line_number": 211, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 223, "usage_type": "call"}, {"api_name": "backend.ruta_sprites", "line_number": 223, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QTransform", "line_number": 226, "usage_type": "call"}, {"api_name": "backend.diccionario_colores", "line_number": 229, "usage_type": "name"}, {"api_name": "itertools.zip_longest", "line_number": 232, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 236, "usage_type": "call"}, {"api_name": "backend.ruta_sprites", "line_number": 236, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPainter", "line_number": 256, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPainter", "line_number": 267, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 300, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 300, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 302, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 302, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.Key_Up", "line_number": 316, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 316, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.Key_Down", "line_number": 318, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 318, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.Key_Left", "line_number": 320, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 320, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.Key_Right", "line_number": 322, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 322, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 332, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPainter", "line_number": 356, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 359, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QSize", "line_number": 363, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 364, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 368, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 372, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 378, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 381, "usage_type": "call"}]} +{"seq_id": "370610667", "text": "\nimport sys\nimport os\nimport itertools\nimport random\n\nimport pytest\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))\nimport votelib.evaluate.auxiliary\nfrom votelib.evaluate.core import Tie\n\nRAND_VOTES = [\n {chr(65 + k): random.randint(0, 1000) for k in range(20)} for i in range(10)\n]\n\nSTABLE_SORTITORS = [\n votelib.evaluate.auxiliary.RandomUnrankedBallotSelector(seed=1711),\n votelib.evaluate.auxiliary.Sortitor(seed=1711),\n]\n\nUNSTABLE_SORTITORS = [\n votelib.evaluate.auxiliary.RandomUnrankedBallotSelector(),\n votelib.evaluate.auxiliary.Sortitor(),\n]\n\nSTABLE_SORTITOR_PARAMS = (('sortitor', 'votes', 'n_seats'), list(itertools.product(\n STABLE_SORTITORS, RAND_VOTES, range(1, 4)\n)))\n\nUNSTABLE_SORTITOR_PARAMS = (('sortitor', 'votes', 'n_seats'), list(itertools.product(\n UNSTABLE_SORTITORS, RAND_VOTES, range(1, 4)\n)))\n\n@pytest.mark.parametrize(*STABLE_SORTITOR_PARAMS)\ndef test_sortitor_stable(sortitor, votes, n_seats):\n elected_vars = _generate_variants(sortitor, votes, n_seats)\n assert len(elected_vars) == 1\n var = elected_vars.pop()\n assert len(var) == n_seats\n assert not any(isinstance(elected, Tie) for elected in var)\n\n\n@pytest.mark.parametrize(*UNSTABLE_SORTITOR_PARAMS)\ndef test_sortitor_unstable(sortitor, votes, n_seats):\n elected_vars = _generate_variants(sortitor, votes, n_seats)\n assert len(elected_vars) > 1\n for var in elected_vars:\n assert len(var) == n_seats\n assert not any(isinstance(elected, Tie) for elected in var)\n\n\ndef _generate_variants(evaluator, votes, n_seats):\n elected_vars = set()\n for i in range(100):\n random.seed(None)\n elected_vars.add(tuple(evaluator.evaluate(votes, n_seats)))\n return elected_vars\n\n\ndef test_inporder():\n votes = {\n 'A': 500,\n 'B': 300,\n 'C': 160,\n }\n eval = votelib.evaluate.auxiliary.InputOrderSelector()\n result = eval.evaluate(votes, 2)\n assert result == ['A', 'B']\n", "sub_path": "tests/evaluate/test_auxiliary.py", "file_name": "test_auxiliary.py", "file_ext": "py", "file_size_in_byte": 1971, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 14, "usage_type": "call"}, {"api_name": "votelib.evaluate.auxiliary.evaluate.auxiliary.RandomUnrankedBallotSelector", "line_number": 18, "usage_type": "call"}, {"api_name": "votelib.evaluate.auxiliary.evaluate", "line_number": 18, "usage_type": "attribute"}, {"api_name": "votelib.evaluate.auxiliary", "line_number": 18, "usage_type": "name"}, {"api_name": "votelib.evaluate.auxiliary.evaluate.auxiliary.Sortitor", "line_number": 19, "usage_type": "call"}, {"api_name": "votelib.evaluate.auxiliary.evaluate", "line_number": 19, "usage_type": "attribute"}, {"api_name": "votelib.evaluate.auxiliary", "line_number": 19, "usage_type": "name"}, {"api_name": "votelib.evaluate.auxiliary.evaluate.auxiliary.RandomUnrankedBallotSelector", "line_number": 23, "usage_type": "call"}, {"api_name": "votelib.evaluate.auxiliary.evaluate", "line_number": 23, "usage_type": "attribute"}, {"api_name": "votelib.evaluate.auxiliary", "line_number": 23, "usage_type": "name"}, {"api_name": "votelib.evaluate.auxiliary.evaluate.auxiliary.Sortitor", "line_number": 24, "usage_type": "call"}, {"api_name": "votelib.evaluate.auxiliary.evaluate", "line_number": 24, "usage_type": "attribute"}, {"api_name": "votelib.evaluate.auxiliary", "line_number": 24, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 27, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 31, "usage_type": "call"}, {"api_name": "votelib.evaluate.core.Tie", "line_number": 41, "usage_type": "argument"}, {"api_name": "pytest.mark.parametrize", "line_number": 35, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 35, "usage_type": "attribute"}, {"api_name": "votelib.evaluate.core.Tie", "line_number": 50, "usage_type": "argument"}, {"api_name": "pytest.mark.parametrize", "line_number": 44, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 44, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 56, "usage_type": "call"}, {"api_name": "votelib.evaluate.auxiliary.evaluate.auxiliary.InputOrderSelector", "line_number": 67, "usage_type": "call"}, {"api_name": "votelib.evaluate.auxiliary.evaluate", "line_number": 67, "usage_type": "attribute"}, {"api_name": "votelib.evaluate.auxiliary", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "485491431", "text": "import pandas as pd\r\nfrom sklearn.model_selection import StratifiedKFold \r\nfrom sklearn.naive_bayes import GaussianNB \r\nfrom sklearn import metrics\r\n\r\n\r\ndef naiveBayes(df,columns): \r\n models = []\r\n pred = []\r\n score = []\r\n conf = []\r\n \r\n X = df[columns]\r\n Y = df[\"Label\"]\r\n sfolder = StratifiedKFold(n_splits = 5,shuffle = False)\r\n for train_index, test_index, in sfolder.split(X, Y): \r\n print(\"TRAIN:\", train_index, \"TEST:\", test_index)\r\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\r\n Y_train, Y_test = Y.iloc[train_index], Y.iloc[test_index]\r\n \r\n clf = GaussianNB()\r\n clf.fit(X_train,Y_train.values)\r\n \r\n models.append(clf)\r\n pred.append(clf.predict(X_test))\r\n i = len(pred)\r\n conf.append(metrics.confusion_matrix(Y_test,pred[i-1]))\r\n score.append(metrics.accuracy_score(Y_test,pred[i-1]))\r\n return models, pred, conf, score\r\n\r\n\r\n \r\nif __name__ == '__main__':\r\n file = '.\\\\final_news_sen.csv'\r\n\r\n traningdata = pd.read_csv(file)\r\n traningdata.dropna(inplace=True)\r\n \r\n XClass_DT = ['ZipCode','Unemp Rate']\r\n \r\n\r\n model, pred, conf, score = naiveBayes(traningdata, XClass_DT)\r\n \r\n print(\"Confusion Matrix of Naive Bayes: \")\r\n print(conf)\r\n print()\r\n \r\n print(\"Accquarcy of Naive Bayes: \")\r\n print(score)\r\n", "sub_path": "model/naiveBayes.py", "file_name": "naiveBayes.py", "file_ext": "py", "file_size_in_byte": 1381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 27, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 28, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "603557773", "text": "from flask.ext.testing import TestCase\nimport ara.webapp as w\nimport ara.models as m\nimport datetime\n\n\nclass TestFilters(TestCase):\n '''Tests for our Jinja2 filters'''\n\n SQLALCHEMY_DATABASE_URI = 'sqlite://'\n TESTING = True\n\n def create_app(self):\n return w.create_app(self)\n\n def setUp(self):\n m.db.create_all()\n self.env = self.app.jinja_env\n\n def tearDown(self):\n m.db.session.remove()\n m.db.drop_all()\n\n def test_pathtruncate_short(self):\n path = '/short/path.yml'\n self.assertTrue(len(path) < self.app.config['ARA_PATH_MAX'])\n\n t = self.env.from_string('{{ path | pathtruncate }}')\n res = t.render(path=path)\n self.assertEqual(res, path)\n self.assertFalse(res.startswith('...'))\n self.assertEqual(res.count('path.yml'), 1)\n\n def test_pathtruncate_long(self):\n path = '/this/is_definitely/a/very/long/path.yml'\n self.assertTrue(len(path) > self.app.config['ARA_PATH_MAX'])\n t = self.env.from_string('{{ path | pathtruncate }}')\n res = t.render(path=path)\n\n self.assertNotEqual(res, path)\n self.assertTrue(res.startswith('...'))\n self.assertEqual(res.count('path.yml'), 1)\n\n def test_datefmt(self):\n datestr = '2016-05-25 14:34:00'\n date = datetime.datetime.strptime(datestr, '%Y-%m-%d %H:%M:%S')\n t = self.env.from_string('{{ date | datefmt }}')\n res = t.render(date=date)\n\n self.assertEqual(res, datestr)\n\n def test_timefmt(self):\n time = datetime.timedelta(seconds=90061)\n t = self.env.from_string('{{ time | timefmt }}')\n res = t.render(time=time)\n\n self.assertEqual(res, '1 day, 1:01:01')\n\n def test_from_json(self):\n data = '{\"key\": \"value\"}'\n t = self.env.from_string('{{ data | from_json }}')\n res = t.render(data=data)\n\n self.assertEqual(res, u\"{u'key': u'value'}\")\n\n def test_to_json(self):\n data = {'key': 'value'}\n t = self.env.from_string('{{ data | to_nice_json }}')\n res = t.render(data=data)\n\n self.assertEqual(res,\n u'{\\n \"key\": \"value\"\\n}')\n", "sub_path": "tests/unit/test_filters.py", "file_name": "test_filters.py", "file_ext": "py", "file_size_in_byte": 2177, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.ext.testing.TestCase", "line_number": 7, "usage_type": "name"}, {"api_name": "ara.webapp.create_app", "line_number": 14, "usage_type": "call"}, {"api_name": "ara.webapp", "line_number": 14, "usage_type": "name"}, {"api_name": "ara.models.db.create_all", "line_number": 17, "usage_type": "call"}, {"api_name": "ara.models.db", "line_number": 17, "usage_type": "attribute"}, {"api_name": "ara.models", "line_number": 17, "usage_type": "name"}, {"api_name": "ara.models.db.session.remove", "line_number": 21, "usage_type": "call"}, {"api_name": "ara.models.db", "line_number": 21, "usage_type": "attribute"}, {"api_name": "ara.models", "line_number": 21, "usage_type": "name"}, {"api_name": "ara.models.db.drop_all", "line_number": 22, "usage_type": "call"}, {"api_name": "ara.models.db", "line_number": 22, "usage_type": "attribute"}, {"api_name": "ara.models", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "436453745", "text": "from ray import serve\n\nfrom io import BytesIO\nfrom PIL import Image\nimport requests\n\nimport torch\nfrom torchvision import transforms\nfrom torchvision.models import resnet18\n\nBACKEND = \"resnet18:v0\"\n\nclass ImageModel:\n def __init__(self):\n self.model = resnet18(pretrained=True).eval()\n self.preprocessor = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Lambda(lambda t: t[:3, ...]), # remove alpha channel\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n def __call__(self, flask_request):\n image_payload_bytes = flask_request.data\n pil_image = Image.open(BytesIO(image_payload_bytes))\n print(\"[1/3] Parsed image data: {}\".format(pil_image))\n\n pil_images = [pil_image] # Our current batch size is one\n input_tensor = torch.cat(\n [self.preprocessor(i).unsqueeze(0) for i in pil_images])\n print(\"[2/3] Images transformed, tensor shape {}\".format(\n input_tensor.shape))\n\n with torch.no_grad():\n output_tensor = self.model(input_tensor)\n print(\"[3/3] Inference done!\")\n return {\"class_index\": int(torch.argmax(output_tensor[0]))}\n\n@ray.remote(resources={'num_cpus': 4})\ndef func():\n import time\n time.sleep(10)\n print(\"resultz: Scaling!\")\n\nif __name__ == \"__main__\":\n print(\"Running!\")\n # Serve needs 1 CPU in head, and 1 on each ray node\n\n config = {\"num_replicas\": 3} # replica == 1 CPU, 0 GPU\n\n # start up with smaller number of replicas\n client = serve.start()\n client.create_backend(BACKEND, ImageModel, config=config)\n client.create_endpoint(\n \"predictor\",\n backend=BACKEND,\n route=\"/image_predict\",\n methods=[\"POST\"])\n\n # update size of cluster\n larger_config = {\"num_replicas\": 10}\n client.update_backend_config(BACKEND, larger_config)\n\n ray_logo_bytes = requests.get(\n \"https://github.com/ray-project/ray/raw/\"\n \"master/doc/source/images/ray_header_logo.png\").content\n\n resp = requests.post(\n \"http://localhost:8000/image_predict\", data=ray_logo_bytes)\n print(resp.json())\n\n # trigger scaling\n r = ray.get([func.remote() for i in range(1000)])\n\n", "sub_path": "serving/scale.py", "file_name": "scale.py", "file_ext": "py", "file_size_in_byte": 2350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "torchvision.models.resnet18", "line_number": 15, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 16, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 17, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 17, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 18, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 18, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 19, "usage_type": "name"}, {"api_name": "torchvision.transforms.Lambda", "line_number": 20, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 20, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 21, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 27, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 44, "usage_type": "call"}, {"api_name": "ray.remote", "line_number": 41, "usage_type": "call"}, {"api_name": "ray.serve.start", "line_number": 54, "usage_type": "call"}, {"api_name": "ray.serve", "line_number": 54, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 66, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 70, "usage_type": "call"}, {"api_name": "ray.get", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "507854394", "text": "'''\nMostly code from\nhttps://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py\n'''\n\nfrom __future__ import print_function\nimport os\nimport numpy as np\n\nimport crop_encoder_data\nfrom keras.callbacks import TensorBoard\nfrom keras.datasets import mnist\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization\nfrom keras.layers import Convolution2D, MaxPooling2D, Input, Conv2D, Dropout\nfrom keras import regularizers\nfrom keras.utils import np_utils\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom PIL import Image\nparent_dir = os.path.dirname(os.getcwd())\n\nbatch_size = 50\nnb_classes = 8\nnb_epoch = 20\n\n# input image dimensions\nimg_rows, img_cols = 128, 128\ncrop_rows,crop_cols = 32,32\n# number of convolutional filters to use\nnb_filters = 16\n# size of pooling area for max pooling\npool_size = (2, 2)\n# convolution kernel size\nkernel_size = (3, 3)\n\n# the data, shuffled and split between train and test sets\n\nX_train, X_crop_train , X_test , X_crop_test = crop_encoder_data.load_data(0.5)\n\n# (X_train, y_train), (X_test, y_test) = mnist.load_data()\nif K.image_dim_ordering() == 'th':\n X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\n X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\n X_crop_train = X_crop_train.reshape(X_train.shape[0], 1, crop_rows, crop_cols)\n X_crop_test = X_crop_test.reshape(X_test.shape[0], 1, crop_rows, crop_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n print(\"DOEST HIS RUN\")\n X_train = X_train.reshape(X_train.shape[0], img_rows,img_cols,1)\n X_test = X_test.reshape(X_test.shape[0], img_rows,img_cols,1)\n X_crop_train = X_crop_train.reshape(X_train.shape[0], crop_rows*crop_cols)\n X_crop_test = X_crop_test.reshape(X_test.shape[0], crop_rows*crop_cols)\n input_shape = (img_rows,img_cols,1)\n\nX_train = X_train.astype('float32')\nX_train /= 255\nX_test = X_test.astype('float32')\nX_test /= 255\n\nX_crop_train = X_crop_train.astype('float32')\nX_crop_train /= 255\nX_crop_test = X_crop_test.astype('float32')\nX_crop_test /= 255\n\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint('X_train shape:', X_crop_train.shape)\nprint(X_crop_train.shape[0], 'train samples')\n\nnb_filters = 8\nkernel_size = (3,3)\npool_size = (2,2)\n\ninput_img = Input(shape=(img_rows,img_cols,1))\nencoded = Conv2D(nb_filters*2,kernel_size[0],kernel_size[1],\n border_mode=\"same\",activation='relu',\n W_regularizer=regularizers.l1(0.1))(input_img)\nencoded = BatchNormalization(mode=2)(encoded)\nencoded = Conv2D(nb_filters,kernel_size[0],kernel_size[1],\n border_mode=\"same\",activation='relu',\n W_regularizer=regularizers.l1(0.1))(encoded)\nencoded = BatchNormalization(mode=2)(encoded)\nencoded = Dropout(0.2)(encoded)\nencoded = MaxPooling2D(pool_size=(2,2))(encoded)\nencoded = Conv2D(nb_filters,kernel_size[0],kernel_size[1],\n border_mode=\"same\",activation='relu',\n W_regularizer=regularizers.l1(0.1))(encoded)\nencoded = BatchNormalization(mode=2)(encoded)\nencoded = MaxPooling2D(pool_size=(2,2))(encoded)\nencoded = Flatten()(encoded)\n#Dont put regularizers on last layer\nencoded = Dense(1024, activation='sigmoid')(encoded)\n\nautoencoder = Model(input_img, encoded)\nautoencoder.compile(optimizer='adam', loss='mean_absolute_error')\n# at this point the representation is (32*32, 1)\n\nif(os.path.isfile(\"cnn_autoencoder.h5\") == True):\n del autoencoder\n autoencoder = load_model('cnn_autoencoder.h5')\n\n\nif __name__ == \"__main__\":\n autoencoder.fit(X_train, X_crop_train, batch_size=batch_size, nb_epoch=nb_epoch,\n verbose=1,\n callbacks=[TensorBoard(log_dir='/tmp/autoencoder')],\n #validation_data=(X_test, X_crop_test)\n )\n\n autoencoder.save(\"cnn_autoencoder.h5\",overwrite=True)\n print(\"cnn_autoencoder \"+\"Saved model to disk\")\n\ndef show_result(pred_result):\n result = pred_result.copy()\n result *= 255\n result = np.reshape(result,(result.shape[1],result.shape[1]))\n result = np.array(result,dtype='uint8')\n Image.fromarray(result).show()\n", "sub_path": "python-scripts/autoencoder_2.py", "file_name": "autoencoder_2.py", "file_ext": "py", "file_size_in_byte": 4173, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 21, "usage_type": "call"}, {"api_name": "crop_encoder_data.load_data", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.backend.image_dim_ordering", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 42, "usage_type": "name"}, {"api_name": "keras.layers.Input", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 76, "usage_type": "call"}, {"api_name": "keras.regularizers.l1", "line_number": 78, "usage_type": "call"}, {"api_name": "keras.regularizers", "line_number": 78, "usage_type": "name"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 79, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 80, "usage_type": "call"}, {"api_name": "keras.regularizers.l1", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.regularizers", "line_number": 82, "usage_type": "name"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.regularizers.l1", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.regularizers", "line_number": 88, "usage_type": "name"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 91, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "keras.models.load_model", "line_number": 101, "usage_type": "call"}, {"api_name": "keras.callbacks.TensorBoard", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 118, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 119, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 119, "usage_type": "name"}]} +{"seq_id": "490273517", "text": "#import stuff\r\nimport pandas\r\nimport numpy\r\nfrom matplotlib import pyplot as plt\r\nimport tkinter as tk\r\nfrom tkinter import filedialog\r\nfrom tkinter import *\r\n\r\n\r\n#init colors\r\nlavender = 'lavender'\r\nblack = 'black'\r\ngrey = 'grey'\r\nwhite = 'white'\r\n\r\n\r\n#init tkinter window\r\nroot = tk.Tk()\r\nroot.title('Excel Line Graph Maker')\r\nmainwindow = tk.Canvas(root, width = 300, height = 300, bg = lavender)\r\nmainwindow.pack(fill=\"both\", expand=True)\r\n\r\n\r\n#button function\r\ndef getExcel():\r\n\tglobal datafile\r\n\r\n\tfilepath = filedialog.askopenfilename()\r\n\ttry:\r\n\t\tdatafile = pandas.read_excel(filepath)\r\n\t\tif len(datafile.columns) == 2:\r\n\t\t\tx_label = datafile.columns[0]\r\n\t\t\ty_label = datafile.columns[1]\r\n\t\t\tx = datafile[x_label].tolist()\r\n\t\t\ty = datafile[y_label].tolist()\r\n\t\t\tplt.plot(x, y)\r\n\t\t\tplt.title(filepath)\r\n\t\t\tplt.xlabel(x_label)\r\n\t\t\tplt.ylabel(y_label)\r\n\r\n\t\t\tplt.show()\r\n\t\telse:\r\n\t\t\tpopupwindow(\"Please select an excel file with 2 columns!\")\r\n\texcept:\r\n\t\tpopupwindow(\"Please select an excel file!\")\r\n\r\n\r\n#def errormessage\r\n\r\ndef popupwindow(message):\r\n\tpopup = tk.Tk()\r\n\tpopup.wm_title(\"Error!\")\r\n\ttextlabel = tk.Label(popup, width=30, text=message, font=('helvetica', 10, 'bold'))\r\n\ttextlabel.pack(side=\"top\", fill=\"x\", pady=10)\r\n\tokaybutton = tk.Button(popup, text=\"Okay\", command=popup.destroy)\r\n\tokaybutton.pack()\r\n\tpaddinglabel = tk.Label(popup, width=50, text=\"\")\r\n\tpaddinglabel.pack(side=\"bottom\", fill=\"x\", pady=10)\r\n\r\n\tpopup.mainloop()\r\n\r\n#build on top of window\r\nmessage_1 = tk.Label(text='Excel Data Visualiser!\\nCurrent version makes a line graph\\nof 1 column of data.', bg = lavender, fg = black, font=('helvetica', 12, 'bold'))\r\nbrowseButtonExcel = tk.Button(text = 'Import Excel File', command=getExcel, bg='grey', fg=white, font=('helvetica', 12, 'bold'))\r\nmainwindow.create_window(150, 150, window=browseButtonExcel)\r\nmainwindow.create_window(150, 50, window=message_1)\r\n\r\n\r\n#loop\r\nroot.mainloop()\r\n\r\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1921, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "tkinter.Tk", "line_number": 18, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 20, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 28, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 28, "usage_type": "name"}, {"api_name": "pandas.read_excel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 51, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 53, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 55, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 57, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "184807448", "text": "from flask import Flask,render_template,redirect,request\nimport csv\napp = Flask(__name__)\nprint(__name__)\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n@app.route('/')\ndef service(page_name):\n return render_template(page_name)\n\ndef data_base(data):\n with open(\"../database.txt\", mode=\"a\") as database:\n email=data[\"email\"]\n subject=data[\"subject\"]\n message=data[\"message\"]\n file_write=database.write(f\"email={email} sub={subject} content={message}\\n \")\n\ndef base_csv(data):\n with open(\"store.csv\", newline='', mode=\"a\") as db2:\n email=data[\"email\"]\n subject=data[\"subject\"]\n message=data[\"message\"]\n csv_writer=csv.writer(db2,delimiter=',',quotechar='\"',quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow([email,subject,message])\n\n\n@app.route('/login', methods=['POST', 'GET'])\ndef login():\n if request.method==\"POST\":\n data=request.form.to_dict()\n base_csv(data)\n return redirect(\"/thankyou.html\")\n else:\n return \"try again\"\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 26, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request.method", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.form.to_dict", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "163937732", "text": "from django.db import models\nfrom .tasks import thanks_mail\n\n\nclass Subscription(models.Model):\n \"\"\"Подписка на email\"\"\"\n\n email = models.EmailField()\n date = models.DateTimeField(\"Дата подписки\", auto_now_add=True)\n\n def __str__(self):\n return self.email\n\n class Meta:\n verbose_name = \"Подписка на email\"\n verbose_name_plural = \"Подписки на email\"\n ordering = ['-date']\n\n def save(self, *args, **kwargs):\n thanks_mail.delay(self.email)\n super().save(*args, **kwargs)\n", "sub_path": "subscription/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 570, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.db.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "tasks.thanks_mail.delay", "line_number": 20, "usage_type": "call"}, {"api_name": "tasks.thanks_mail", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "251993761", "text": "\"\"\"\nLidl_NL spider created on the top of ATSSpider\n\nscrapy crawl lidl_nl -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.werkenbijlidl.nl/cps/rde/SID-07C346E0-FC06FD13/career_lidl_nl/hs.xsl/jobsearch.htm?searchquery=&lat=&lon=&locname=&hitsperchunk=10\"\n\nSeed URL:\n http://www.werkenbijlidl.nl/cps/rde/SID-07C346E0-FC06FD13/career_lidl_nl/hs.xsl/jobsearch.htm?searchquery=&lat=&lon=&locname=&hitsperchunk=10\n\"\"\"\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, Prefix, RemoveBadElements\n\n\nclass Lidl_NL(ATSSpider):\n\n name = 'lidl_nl'\n\n def parse(self, response):\n \"\"\"\n Parse each job urls via GET method.\n \"\"\"\n sel = Selector(response)\n\n if not self.expected_job_count_set:\n expected_count = sel.xpath(\n '//div[@class=\"job-search-box-top\"]//div/h2[contains(text(), \"Resultaat:\")]/text()'\n ).extract()\n if expected_count:\n self.expected_job_count = expected_count\n\n for article in sel.xpath(\n '//section[contains(@class, \"job-search-box-middle\")]/div/article'\n ):\n href = article.xpath(\n './a[@class=\"link-vacancy\"]/@href'\n ).extract()\n if href:\n yield Request(\n callback=self.parse_job_callback(),\n url=urljoin(response.url, href[0])\n )\n\n # pagination\n next_page = sel.xpath(\n '//nav[@class=\"pagination\"]/ul/li/a[@rel=\"next\"]/@href'\n ).extract()\n if next_page:\n yield Request(\n callback=self.parse,\n url=urljoin(response.url, next_page[0])\n )\n\n def parse_job(self, response):\n \"\"\"\n Parse all required information.\n \"\"\"\n sel = Selector(response)\n\n loader = BrightcorpItemLoader(selector=sel)\n loader.add_xpath(\n 'title',\n '//section[@class=\"apply-online-box\"]//div/h1/text()'\n )\n loader.add_xpath(\n 'date',\n '//section[@class=\"apply-online-box\"]//div/ul/li[1]/text()',\n ConvertDateString('%d.%m.%Y')\n )\n loader.add_xpath(\n 'referencenumber',\n '//table/tr/td[contains(text(), \"Referentie:\")]/following-sibling::td/text()',\n Prefix('%s-' % self.name)\n )\n loader.add_xpath(\n 'location',\n '//table/tr/td[contains(text(), \"Locatie:\")]/following-sibling::td/text()'\n )\n if not loader.get_output_value('location'):\n loader.add_xpath(\n 'location',\n '//section[@class=\"apply-online-box\"]//div/ul/li[3]/text()'\n )\n loader.add_xpath(\n 'jobtype',\n '//table/tr/td[contains(text(), \"Type contract:\")]/following-sibling::td/text()'\n )\n loader.add_xpath(\n 'description',\n '//section[@class=\"widestcontent vacancy-page\"]//div/div[@class=\"vacancy-features\"]',\n RemoveBadElements(['img'])\n )\n loader.add_value('url', response.url)\n loader.add_value('apply_url', response.url)\n\n yield loader.load_item()\n", "sub_path": "brightcorp/brightcorp/spiders/lidl_nl.py", "file_name": "lidl_nl.py", "file_ext": "py", "file_size_in_byte": 3412, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "brightcorp.base.atsspiders.ATSSpider", "line_number": 19, "usage_type": "name"}, {"api_name": "scrapy.selector.Selector", "line_number": 27, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 43, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 45, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 53, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 55, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 62, "usage_type": "call"}, {"api_name": "brightcorp.items.BrightcorpItemLoader", "line_number": 64, "usage_type": "call"}, {"api_name": "brightcorp.processors.ConvertDateString", "line_number": 72, "usage_type": "call"}, {"api_name": "brightcorp.processors.Prefix", "line_number": 77, "usage_type": "call"}, {"api_name": "brightcorp.processors.RemoveBadElements", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "263903826", "text": "import time\nfrom Base.base import Base\nfrom selenium.webdriver.common.by import By\nfrom Base import ReadConfig\nfrom selenium import webdriver\nfrom PageObj.ngboss.login_page import LoginPage\nfrom PageObj.ngboss.mainpage import MainPage\nfrom PageObj.order.BizCommon.PersonOrder.ElementPartBase import DealUserCommon\nfrom PageObj.order.BizCommon.PersonOrder.ElementPartBase import SelectElements\nfrom Check.PageCheck import PageAssert\nfrom Base.Mylog import LogManager\n\nrc = ReadConfig.ReadConfig(\"ngboss_config.ini\")\nlogger = LogManager('PlatServiceOrder').get_logger_and_add_handlers(1,is_add_stream_handler=True, log_path=ReadConfig.log_path, log_filename=time.strftime(\"%Y-%m-%d\")+'.log' )\n\nclass PlatServiceOrder(Base):\n '''平台业务受理'''\n def open_base(self):\n self.driver.get(rc.get_ngboss('url'))\n self.driver.maximize_window()\n #self.driver.implicitly_wait(30)\n\n def openPlatServiceOrder(self):\n LoginPage(self.driver).login(rc.get_ngboss('username'), rc.get_ngboss('password')) # 登录\n MainPage(self.driver).open_CataMenu('crm9000', 'IBS1000', 'IBS9271',menuPath='page/order.page.pc.person.plat.PlatOrder') # 进入产品变更页面\n logger.info('进入平台业务办理页面')\n\n def searchPlatSvcByOfferCode(self,OfferCode):\n '''\n 通过OfferCode查询平台服务\n :param OfferCode:平台服务编码OfferCode\n :return:\n '''\n btn_PlatOrderAdd = (By.XPATH,\"//li[contains(@ontap,'platOrderAddItem')]\")\n self.isElementDisplay(btn_PlatOrderAdd,'click')\n self.isElementDisplay((By.ID,'myplatform_tab_li_2'),'click') #点击全部\n self.sendkey((By.ID,'COND'),OfferCode) #输入平台服务编码\n btn_platSvcSearch = (By.XPATH,\"//span[contains(@ontap,'platOrderAdd.searchOffer')]\")\n self.isElementDisplay(btn_platSvcSearch,'click')\n btn_subPlatOffer = (By.XPATH,\"//span[contains(@ontap,'platOrderAdd.do_subPlatOffer')]\")\n self.isElementDisplay(btn_subPlatOffer,'click') #点击订购按钮\n SelectElements(self.driver).backAcceptPage() #点击回到受理主页面\n return self.checkPlatOrderShow(OfferCode) #返回True或者False ,\n\n def acceptSubPlatSvcByOfferCode(self,accessNum,OfferCode):\n '''\n 订购OfferCode平台服务\n :param OfferCode:平台服务编码OfferCode\n :return:\n '''\n title = '测试号码_%s订购平台服务测试记录%s' % (accessNum,OfferCode)\n self.add_dochead(title)\n self.openPlatServiceOrder()\n self.screen_step(\"进入平台业务办理菜单\")\n DealUserCommon(self.driver).AuthUserInfo(accessNum) #用户鉴权\n self.screen_step(\"查看平台服务订购列表\")\n isSuc = self.searchPlatSvcByOfferCode(OfferCode)\n if not isSuc:\n logger.info('平台服务%s未订购成功'.format(OfferCode))\n PageCommonPart(self.driver).submit() #点击提交\n submitMsg = PageAssert(self.driver).assert_Submit()\n logger.info('业务受理信息:{}'.format(submitMsg))\n self.screen_step('点击提交,受理信息:{}'.format(submitMsg))\n self.save_docreport(title)\n\n\n\n\n\n\n\n\n\n\n def checkPlatOrderShow(self,OfferCode):\n '''\n 检查选择订购的平台服务是否展示在主页面\n :param OfferCode: 平台服务编码\n :return:返回True或者False,如果在订购列表则返回True,否则返回False\n '''\n platOrderShowStr = \"//div[contains(@value,'%s') and contains(@ontap,'platOrderShow.unsubPlatform')]\" %OfferCode\n flag = self.isElementDisplay(platOrderShowStr)\n return flag\n\n\n\n\n\n\n\n\n", "sub_path": "PageObj/order/person/PlatSvcOrder.py", "file_name": "PlatSvcOrder.py", "file_ext": "py", "file_size_in_byte": 3690, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "Base.ReadConfig.ReadConfig", "line_number": 13, "usage_type": "call"}, {"api_name": "Base.ReadConfig", "line_number": 13, "usage_type": "name"}, {"api_name": "Base.Mylog.LogManager", "line_number": 14, "usage_type": "call"}, {"api_name": "Base.ReadConfig.log_path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "Base.ReadConfig", "line_number": 14, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 14, "usage_type": "call"}, {"api_name": "Base.base.Base", "line_number": 16, "usage_type": "name"}, {"api_name": "PageObj.ngboss.login_page.LoginPage", "line_number": 24, "usage_type": "call"}, {"api_name": "PageObj.ngboss.mainpage.MainPage", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 34, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 34, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 36, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 36, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 37, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 37, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 38, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 38, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 40, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 40, "usage_type": "name"}, {"api_name": "PageObj.order.BizCommon.PersonOrder.ElementPartBase.SelectElements", "line_number": 42, "usage_type": "call"}, {"api_name": "PageObj.order.BizCommon.PersonOrder.ElementPartBase.DealUserCommon", "line_number": 55, "usage_type": "call"}, {"api_name": "Check.PageCheck.PageAssert", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "581724395", "text": "# Author: Hamidreza Nademi\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef plot_data(inputs, labels, sample_icon, sample_color, title):\n \"\"\" Plot dataset \"\"\"\n x0, x1 = zip(*inputs)\n uniqe_lbls, dict_data = set(labels), {}\n\n for label in uniqe_lbls:\n dict_data[label] = []\n\n for i in range(len(labels)):\n dict_data[labels[i]].append(inputs[i])\n\n for label, color in zip(dict_data.keys(), sample_color):\n x0, x1 = zip(*dict_data[label])\n plt.plot(x0, x1, sample_icon, color=color, label=f'class {int(label)}')\n\n plt.xlabel('x0')\n plt.ylabel('x1')\n plt.title(title)\n plt.legend()\n\n\ndef plot_cluster(clusters, means, colors, sample_icon, title):\n for cluster, mean, color in zip(clusters, means, colors):\n x0, x1 = zip(*cluster)\n plt.plot(x0, x1, sample_icon, color=color)\n plt.plot(mean[0], mean[1], marker='X', color='BLACK')\n plt.xlabel('x0')\n plt.ylabel('x1')\n plt.title(title)\n\n\ndef pre_process(dataset_path):\n inputs = []\n dataset = np.loadtxt(dataset_path, delimiter=',')\n x0, x1, labels = zip(*dataset)\n\n for i in range(len(x0)):\n inputs.extend([[x0[i], x1[i]]])\n\n return inputs, labels\n", "sub_path": "Machine Learning/Homework4-Clustering/Code/utililty.py", "file_name": "utililty.py", "file_ext": "py", "file_size_in_byte": 1214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "146908040", "text": "#Autor: Ricardo Cornejo Lozano\r\n#Programa que contiene 7 programas para escoger. Entre este programa podras dibujar figuras geometricas, aproximarPi, imprimir piramides de numeros y calcular cuantos numeros de pueden dividir entre 17.\r\nimport random\r\nfrom math import*\r\nimport math\r\nimport pygame\r\n\r\nANCHO = 800\r\nALTO = 800\r\n# Colores\r\nBLANCO = (255, 255, 255) # R,G,B en el rango [0,255]\r\nVERDE_BANDERA = (0, 122, 0)\r\nROJO = (255, 0, 0)\r\nNEGRO =(0,0,0)\r\n\r\n\r\ndef CirculosYcuadros():\r\n # Inicializa el motor de pygame\r\n pygame.init()\r\n ventana = pygame.display.set_mode((ANCHO, ALTO)) # Crea la ventana de dibujo\r\n reloj = pygame.time.Clock() # Para limitar los fps\r\n termina = False # Bandera para saber si termina la ejecución\r\n\r\n while not termina: # Ciclo principal\r\n # Procesa los eventos que recibe el programa\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT: # El usuario hizo click en el botón de salir\r\n termina = True\r\n\r\n # Borrar pantalla\r\n ventana.fill(BLANCO)\r\n\r\n # Dibujar, aquí haces todos los trazos que requieras\r\n\r\n # Normalmente llamas a otra función y le pasas -ventana- como parámetro, por ejemplo, dibujarLineas(ventana)\r\n for radio in range(1, ANCHO, 10):\r\n pygame.draw.circle(ventana, NEGRO, (ANCHO//2, ALTO//2), radio , 1)\r\n for width in range (1, ANCHO,10):\r\n pygame.draw.rect(ventana, NEGRO, (ANCHO//2,ALTO//2, width,width), 1)\r\n pygame.draw.rect(ventana, NEGRO, (ANCHO // 2, ALTO // 2, -width, -width), 1)\r\n pygame.draw.rect(ventana, NEGRO, (ANCHO // 2, ALTO // 2, width,-width), 1)\r\n pygame.draw.rect(ventana, NEGRO, (ANCHO // 2, ALTO // 2, -width, width), 1)\r\n\r\n\r\n pygame.display.flip() # Actualiza trazos\r\n reloj.tick(40) # 40 fps\r\n\r\n # Después del ciclo principal\r\n pygame.quit() # termina pygame\r\n\r\n\r\n\r\n\r\ndef dibujarEspiral():\r\n import turtle\r\n for longitud in range(5, 800, 10):\r\n turtle.forward(longitud)\r\n turtle.left(90)\r\n turtle.forward(longitud)\r\n turtle.left(90)\r\n\r\ndef dibujarCirculo(ventana, m):\r\n\r\n for alfa in range (0, 360,15): #grados\r\n Rad = radians(alfa)\r\n r = m * cos(Rad)\r\n x = int(r*cos(Rad)) + ANCHO//3\r\n y = ALTO//2 +int(r*sin(Rad))\r\n colorAzar = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\r\n pygame.draw.circle(ventana, colorAzar, (x,y), 150,1)\r\n\r\n\r\ndef dibujar():\r\n # Inicializa el motor de pygame\r\n pygame.init()\r\n ventana = pygame.display.set_mode((ANCHO, ALTO)) # Crea la ventana de dibujo\r\n reloj = pygame.time.Clock() # Para limitar los fps\r\n termina = False # Bandera para saber si termina la ejecución\r\n\r\n while not termina: # Ciclo principal\r\n # Procesa los eventos que recibe el programa\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT: # El usuario hizo click en el botón de salir\r\n termina = True\r\n\r\n # Borrar pantalla\r\n ventana.fill(NEGRO)\r\n\r\n # Dibujar, aquí haces todos los trazos que requieras\r\n dibujarCirculo(ventana, 300)\r\n\r\n\r\n pygame.display.flip() # Actualiza trazos\r\n reloj.tick(40) # 40 fps\r\n\r\n # Después del ciclo principal\r\n pygame.quit() # termina pygame\r\n\r\n\r\ndef aproximarPI(n):\r\n suma = 0 #sumatoria de las fracciones\r\n cf =1\r\n for d in range (1, n+1,2):\r\n if cf%2 ==1:\r\n suma += 1/d\r\n else:\r\n suma -= 1/d\r\n cf += 1\r\n print (4*suma)\r\n\r\n\r\ndef calcularDiv():\r\n count = 0\r\n for x in range (1000,9999):\r\n numero = x%17\r\n if numero == 0:\r\n count += 1\r\n\r\n resultado = count\r\n print(resultado)\r\n\r\ndef piramideDeUnos():\r\n for i in range(10):\r\n X = 1\r\n for j in range(i):\r\n X = X * 10 + 1\r\n print (\"%d x %d = %d\" % (X,X,X*X))\r\n\r\ndef piramideDe1a9():\r\n n=1\r\n mult = 8\r\n print (\"%d x %d + %d = %d\" % (n,mult,n,n*mult+n))\r\n\r\n for c in range(2,10):\r\n r = (str(n)+str(c))\r\n n = r\r\n resultado = (int(n)*8+int(c))\r\n print(\"%d x %d + %d = %d\" % (int(n),8,int(c),int(resultado)))\r\n\r\ndef main():\r\n print(\"Tarea 05. Seleccione que quiere hacer.\", \"\\n1.Dibujar cuadros y circulos\",\"\\n2.DibujarParabola\", \"\\n3.Dibujar espiral\", \"\\n4.Dibujar Circulos\", \"\\n5.Aproximar PI\", \"\\n6.Contar divisibles entre 17\", \"\\n7.Imprimir piramide de unos\", \"\\n8.Imprimir piramide de 1-9\",\"\\n0.Salir\")\r\n seleccion= int(input(\"Que desea hacer?: \"))\r\n if seleccion == 1:\r\n CirculosYcuadros()\r\n elif seleccion ==2:\r\n dibujarParabola()\r\n elif seleccion == 3:\r\n dibujarEspiral()\r\n elif seleccion ==4:\r\n dibujar()\r\n elif seleccion == 5:\r\n aproximarPI(100)\r\n elif seleccion ==6:\r\n calcularDiv()\r\n elif seleccion == 7:\r\n piramideDeUnos()\r\n elif seleccion ==8:\r\n piramideDe1a9()\r\n elif seleccion == 0:\r\n exit()\r\n\r\n\r\n\r\nmain()", "sub_path": "TAREA005.py", "file_name": "TAREA005.py", "file_ext": "py", "file_size_in_byte": 5132, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pygame.init", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 49, "usage_type": "call"}, {"api_name": "turtle.forward", "line_number": 57, "usage_type": "call"}, {"api_name": "turtle.left", "line_number": 58, "usage_type": "call"}, {"api_name": "turtle.forward", "line_number": 59, "usage_type": "call"}, {"api_name": "turtle.left", "line_number": 60, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "559405834", "text": "import os\nimport sys\nimport json\nimport random\nimport asyncio\nimport aiohttp\nimport discord\nimport jishaku\nimport requests\nimport randomstuff\nfrom discord import *\nfrom discord import message\nfrom random import choice\nfrom datetime import datetime\nfrom discord.ext import commands\nfrom asyncio import TimeoutError\nfrom discord.ext.commands.core import command\n\nclass Reddit(commands.Cog, name= \"Reddit\"): #another bad aiohttp request to eat your ram\n\n def __init__(self,bot):\n self.bot = bot\n\n @commands.command()\n async def meme(self,ctx):\n memepage = [\n \"https://www.reddit.com/r/memes/hot.json\",\n \"https://www.reddit.com/r/dankmemes/hot.json\",\n \"https://www.reddit.com/r/meme/hot.json\"\n ]\n\n\n async with aiohttp.ClientSession() as cs:\n async with cs.get(f\"{random.choice(memepage)}\") as r:\n res = await r.json()\n rm = random.randint(1,25)\n ml = res['data']['children'] [rm]['data']['permalink']\n title = res['data']['children'] [rm]['data']['title']\n ups = res['data']['children'] [rm]['data']['ups']\n com = res['data']['children'] [rm]['data']['num_comments']\n nsfw = res['data']['children'] [rm]['data'][\"over_18\"]\n subr = res['data']['children'] [rm]['data'][\"subreddit_name_prefixed\"]\n\n if nsfw == False:\n em = discord.Embed(title = f'{title}' , description = f\"**[Reddit link](http://www.reddit.com{ml})**\" , color = ctx.author.color)\n em.set_image(url = res['data']['children'] [rm] ['data']['url'])\n em.set_footer(text = f'⬆️{ups} | 💬{com} | {subr}')\n\n await ctx.send(embed = em)\n else:\n pass\n\n @commands.command()\n async def joke(self ,ctx):\n jokepage = [\n \"https://www.reddit.com/r/joke/hot.json\",\n \"https://www.reddit.com/r/jokesmemes/hot.json\"\n ]\n\n async with aiohttp.ClientSession() as cs:\n async with cs.get(f\"{random.choice(jokepage)}\") as r:\n res = await r.json()\n random_joke = random.randint(1,10)\n jl = res['data']['children'] [random_joke]['data']['permalink']\n title = res['data']['children'] [random_joke]['data']['title']\n text = res['data']['children'] [random_joke]['data'][\"selftext\"]\n ups = res['data']['children'] [random_joke]['data']['ups']\n com = res['data']['children'] [random_joke]['data']['num_comments']\n nsfw = res['data']['children'] [random_joke]['data'][\"over_18\"]\n\n if nsfw == False:\n em = discord.Embed(title = '' ,description=f\"**[{title}](http://www.reddit.com{jl})**\" , color = ctx.author.color)\n em.add_field(name= f\"{text}\" , value = '_ _', inline = False)\n em.set_footer(text = f'⬆️{ups} | 💬{com}')\n\n await ctx.send(embed=em)\n\n else:\n pass\n\ndef setup(bot):\n bot.add_cog(Reddit(bot))\n print(\"Reddit cog is loaded\")", "sub_path": "cogs/reddit.py", "file_name": "reddit.py", "file_ext": "py", "file_size_in_byte": 3273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 19, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 19, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 33, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 34, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 45, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 24, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 24, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 60, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 61, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 63, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 72, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 53, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "327905389", "text": "import gym\nfrom gym import spaces\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\n\"\"\"\nActions: 0 = sell, 1 = hold, 2 = buy\n\"\"\"\n\nclass BitcoinTradingEnv(gym.Env):\n\n def __init__(self, df,\n start_date,\n html_save_path=\"result.html\",\n initial_balance=1000,\n debug=False,\n start_idx=1000,\n steps=2000):\n\n self.df = df.copy()\n self.df = self.df[self.df.Date > start_date]\n self.df = self.df.iloc[:start_idx+steps]\n self.html_save_path = html_save_path\n self.df = self.df.dropna().reset_index()\n self.df = self.df.sort_values('Date')\n self.df[\"net_worth\"] = None\n self.df[\"action\"] = None\n self.df[\"reward\"] = None\n\n if debug:\n self.df = self.df.iloc[:start_idx+1000]\n\n self.initial_balance = initial_balance\n self.holding = False\n self.current_step = start_idx\n self.start_idx = start_idx\n\n def _reset_session(self):\n self.current_step = self.start_idx\n self.steps_left = len(self.df) - self.current_step\n self.net_worth = self.initial_balance\n\n def reset(self):\n self.holding = False\n self._reset_session()\n return self._next_observation()\n\n def _next_observation(self):\n return self.df.loc[:self.current_step]\n\n def _get_current_price(self):\n return self.df['Close'].values[self.current_step]\n\n def update_net_worth(self, current_price):\n diff = (current_price - self.previous_price)\n gain_prc = diff / self.previous_price\n self.net_worth = self.net_worth * (1 + gain_prc)\n\n def _take_action(self, action, current_price):\n action_type = action\n\n if self.holding:\n self.update_net_worth(current_price)\n\n self.previous_price = current_price\n\n if action_type == 2:\n self.holding = True # We are holding\n\n elif action_type == 0:\n self.holding = False # We sold everything :)\n\n\n def step(self, action):\n current_price = self._get_current_price()\n prev_net_worth = self.net_worth\n\n self._take_action(action, current_price)\n reward = self.net_worth - prev_net_worth\n\n self.df.loc[self.current_step, 'action'] = action\n self.df.loc[self.current_step, 'reward'] = reward\n self.df.loc[self.current_step, 'net_worth'] = self.net_worth\n\n self.steps_left -= 1\n self.current_step += 1\n\n obs = self._next_observation()\n if self.net_worth <= 0:\n print(\"Net worth < 0 => Done\")\n done = True\n elif self.steps_left == 0:\n print(\"Steps left == 0 => Done\")\n done = True\n else:\n done = False\n\n return obs, reward, done, {}\n\n\n # def render(self, mode='human', **kwargs):\n # from .simple_renderer import plotly_render\n # plotly_render(self.df, self.html_save_path, self.time_plot_min)\n", "sub_path": "env/bitcoin_simple_env.py", "file_name": "bitcoin_simple_env.py", "file_ext": "py", "file_size_in_byte": 3025, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "gym.Env", "line_number": 10, "usage_type": "attribute"}]} +{"seq_id": "607133929", "text": "# =============================================================================\n#\n# EZID :: noid_nog.py\n#\n# Interface to the \"nog\" (nice opaque generator) portion of noid.\n# Because EZID interacts with multiple nog minters, the interface is\n# expressed as a class.\n#\n# Author:\n# Greg Janee \n#\n# License:\n# Copyright (c) 2014, Regents of the University of California\n# http://creativecommons.org/licenses/BSD/\n#\n# -----------------------------------------------------------------------------\n\nimport base64\nimport threading\nimport time\nimport urllib2\n\nimport config\n\nimport logging\nfrom log import stacklog\n\n_LT = logging.getLogger(\"tracer\")\n\n\n_lock = threading.Lock()\n_minterServers = None\n_numAttempts = None\n_reattemptDelay = None\n_minters = None\n_cacheSize = None\n\n\ndef loadConfig():\n global _minterServers, _numAttempts, _reattemptDelay, _minters, _cacheSize\n d = {}\n for ms in config.get(\"shoulders.minter_servers\").split(\",\"):\n p = \"minter_server_\" + ms\n d[config.get(p + \".url\")] = \"Basic \" + base64.b64encode(\n config.get(p + \".username\") + \":\" + config.get(p + \".password\")\n )\n _minterServers = d\n _numAttempts = int(config.get(\"shoulders.minter_num_attempts\"))\n _reattemptDelay = int(config.get(\"shoulders.minter_reattempt_delay\"))\n _lock.acquire()\n try:\n _minters = {}\n finally:\n _lock.release()\n _cacheSize = int(config.get(\"shoulders.minter_cache_size\"))\n\n\ndef _addAuthorization(request):\n d = _minterServers\n for ms in d:\n if request.get_full_url().startswith(ms):\n request.add_header(\"Authorization\", d[ms])\n break\n\n\nclass Minter(object):\n \"\"\"\n A minter for a specific shoulder.\n \"\"\"\n\n def __init__(self, url):\n \"\"\"\n Creates an interface to the noid nog minter at the supplied URL.\n \"\"\"\n self.url = url\n self.cache = []\n self.lock = threading.Lock()\n\n @stacklog\n def mintIdentifier(self):\n \"\"\"\n Mints and returns a scheme-less ARK identifier, e.g.,\n \"13030/fk35717n0h\". Raises an exception on error.\n \"\"\"\n self.lock.acquire()\n try:\n cs = _cacheSize\n if len(self.cache) == 0:\n r = urllib2.Request(\"%s?mint%%20%d\" % (self.url, cs))\n _addAuthorization(r)\n for i in range(_numAttempts):\n c = None\n try:\n c = urllib2.urlopen(r)\n s = c.readlines()\n except:\n if i == _numAttempts - 1:\n raise\n else:\n break\n finally:\n if c:\n c.close()\n time.sleep(_reattemptDelay)\n assert (\n len(s) >= cs + 1\n and all(l.startswith(\"id:\") or l.startswith(\"s:\") for l in s[:cs])\n and s[-2] == \"nog-status: 0\\n\"\n ), \"unexpected return from minter, output follows\\n\" + \"\".join(s)\n self.cache = [l.split(\":\")[1].strip() for l in s[:cs]]\n id = self.cache[0]\n self.cache = self.cache[1:]\n return id\n finally:\n self.lock.release()\n\n def ping(self):\n \"\"\"\n Tests the minter, returning \"up\" or \"down\".\n \"\"\"\n try:\n r = urllib2.Request(self.url)\n _addAuthorization(r)\n c = None\n try:\n c = urllib2.urlopen(r)\n s = c.readlines()\n finally:\n if c:\n c.close()\n assert len(s) >= 2 and s[-2] == \"nog-status: 0\\n\"\n return \"up\"\n except Exception:\n return \"down\"\n\n\ndef getMinter(url):\n \"\"\"\n Returns a Minter object for a noid nog minter at the supplied URL.\n \"\"\"\n _lock.acquire()\n try:\n if url not in _minters:\n _minters[url] = Minter(url)\n return _minters[url]\n finally:\n _lock.release()\n", "sub_path": "impl/noid_nog.py", "file_name": "noid_nog.py", "file_ext": "py", "file_size_in_byte": 4100, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 28, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 31, "usage_type": "call"}, {"api_name": "config.get", "line_number": 42, "usage_type": "call"}, {"api_name": "config.get", "line_number": 44, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 44, "usage_type": "call"}, {"api_name": "config.get", "line_number": 45, "usage_type": "call"}, {"api_name": "config.get", "line_number": 48, "usage_type": "call"}, {"api_name": "config.get", "line_number": 49, "usage_type": "call"}, {"api_name": "config.get", "line_number": 55, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 77, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 89, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 94, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}, {"api_name": "log.stacklog", "line_number": 79, "usage_type": "name"}, {"api_name": "urllib2.Request", "line_number": 122, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "293562619", "text": "# 2019_8_19. Rewrite of prior CIPRS scraper. This should take CIPRS files\n# after converting from pdf to text and concatenating together (separate\n# program using preexisting libraries). It should run through them and get\n# the data scraped/parsed/munged/etc. It should return vcf (VCARD/Contacts)\n# and csv data as output.\n##----------------------------------------------------------------------------\n# SPECIFIC PLAN:\n##----------------------------------------------------------------------------\n\n## Step 1:\n## Function to collect the text from the pdfs.\n##\n## Step 2:\n## Function should break the CIPRS into chunks. Each chunk starts at \n## \"Case Summary for Court Case\" and continues through to \"This document was\n## generated using\"\n##\n## Step 3\n## For each chunk, use regular expressions (a function for each, all\n## run and called within a single function) to gather a dictionary with\n## these values: {\n## 'client', \n## 'case number', \n## 'charge counts'\n## 'county',\n## 'offense date', \n## 'address', \n## }\n##\n## Step 5:\n## Store all the dictionaries in a list.\n##\n## Step 6:\n## For each dictionary in the list of dictionaries, run the vcard writer.\n## Also, run a simple csv-writer. \n##----------------------------------------------------------------------------\n##----------------------------------------------------------------------------\ndef acs_ciprs_textcollector():\n \"\"\"\n This should take the text from the ciprs files (after another program has\n scraped the pdfs). It should return the text for the next functions.\n \"\"\"\n with open ('client_411.txt') as fo_411:\n l_ciprslines = fo_411.readlines()\n\n return l_ciprslines\n##----------------------------------------------------------------------------\ndef acs_ciprs_chunkindexer(l_ciprslines):\n \"\"\"\n Function should break the CIPRS into chunks. Each chunk starts at \"Case\n Summary for Court Case\" and continues through to \"This document was\n generated using\"\n \"\"\"\n x = 0\n l_xvalues = []\n s = \"Case Summary for Court Case\"\n for line in l_ciprslines:\n if s.lower() in line.lower():\n l_xvalues.append(x)\n x += 1\n else:\n x += 1\n\n y = 0\n l_yvalues = []\n s_2 = \"This document was generated using\"\n for line2 in l_ciprslines:\n if s_2.lower() in line2.lower():\n l_yvalues.append(y)\n y += 1\n else:\n y += 1\n\n l_allchunks = [] \n if len(l_xvalues) == len(l_yvalues):\n\n z = 0\n for x in l_xvalues:\n y = l_yvalues[z]\n l_thischunk = l_ciprslines[x:y]\n s_thischunk = ''.join(l_thischunk)\n z += 1\n l_allchunks.append(s_thischunk)\n \n return l_allchunks\n\n else:\n print (\"The data in 'client_411.txt' appears to be incomplete!\")\n##----------------------------------------------------------------------------\ndef acs_regex_clientgrabber(s_thischunk):\n \"\"\"\n This should pick out the client name from l_thischunk and return it.\n \"\"\"\n\n import re\n text_toregex = s_thischunk\n \n#Defendant: CORUM,KEVIN,WAYNE Case Information\n pattern = r'Defendant: [A-Z]{2,20}(([\\.]?[\\,]?[\\s]?){1,3}?)'\n pattern += r'[A-Z]{2,20}(([\\.]?[\\,]?[\\s]?){1,3}?)[A-Z]{2,20}?'\n\n client = re.search(pattern, text_toregex)\n\n if client != None:\n client = client.group()\n client = client.replace(\"Defendant: \", '')\n client = client.replace(\"Case Information\", '')\n if ',' in client:\n splitclient = client.split(',')\n client = splitclient[1] + ' ' + splitclient[0]\n client = client.title()\n else:\n client = client.title()\n else:\n client = ''\n\n return client\n\n##----------------------------------------------------------------------------\n\ndef acs_regex_casenumber(s_thischunk):\n \"\"\"\n This is just the function to regex the 'l_thischunk' list of lines and\n collect the case number via a regular expression. This function should be\n called within a single function calling all the regex functions and creating\n a dictionary for each 'chunk' (ie: each individual CIPRS text)\n \"\"\"\n import re\n\n text_toregex = s_thischunk\n l_allcasenumbs = []\n pattern = r'[0-9]{2}[\\s]?[CRSIFTVDcrsiftvd]{1,3}[\\s]?[0-9]{2,9}'\n casenumb = re.search(pattern, text_toregex)\n\n if casenumb != None:\n casenumb = casenumb.group()\n else:\n casenumb = ''\n\n return casenumb\n##----------------------------------------------------------------------------\ndef acs_regex_pendingchargecounts(s_thischunk): ##This should work for cases that\n ##have not yet been resolved/still pending\n \"\"\"\n This should pick out the charge counts from l_thischunk and return\n a list of all the charge counts.\n \"\"\"\n import re\n\n text_toregex = s_thischunk\n l_allchargecounts = []\n \n pattern = r'([A-Z]{1,19})(([\\s,/-]?){1,3}?)([A-Z]{1,19})(([\\s,/-]?){1,3}?)'\n pattern += r'([A-Z]{1,19})(([\\s,/-]?){1,3}?)([A-Z]{1,19})(([\\s,/-]?){1,3}?)'\n pattern += r'([A-Z]{1,19})(([\\s,/-]?){1,3}?)Verdict:'\n charge = re.findall(pattern, text_toregex)\n \n if charge != None:\n for c in charge:\n if len (c) > 0:\n d = ''.join(c)\n d = d.rstrip()\n d = d.replace('\\n', '')\n d = d.replace('Verdict:', '')\n l_allchargecounts.append(d)\n else:\n c = ''\n else:\n pass\n\n return l_allchargecounts\n##----------------------------------------------------------------------------\ndef acs_regex_countygetter(s_thischunk):\n \"\"\"\n This should pick out the county from l_thischunk and return it.\n \"\"\"\n import re\n\n text_toregex = s_thischunk\n\n pattern = r'Summary for Court Case: [A-Z]{2,20}'\n county = re.search(pattern, text_toregex)\n\n if county != None:\n county = county.group()\n county = county.replace(\"Summary for Court Case: \", '')\n county = county.title()\n else:\n county = ''\n\n return county\n##----------------------------------------------------------------------------\ndef acs_regex_offensedater(s_thischunk):\n \"\"\"\n This should pick out the offense date from l_thischunk and return it.\n \"\"\"\n import re\n\n text_toregex = s_thischunk\n\n pattern = r'Offense Date/Time: [0-9]{2}/[0-9]{2}/[0-9]{4}'\n offensedate = re.search(pattern, text_toregex)\n\n if offensedate != None:\n offensedate = offensedate.group()\n offensedate = offensedate.replace(\"Offense Date/Time: \", '')\n else:\n offensedate = ''\n\n return offensedate\n##----------------------------------------------------------------------------\ndef acs_regex_addresser(s_thischunk):\n \"\"\"\n This should pick out the address from l_thischunk and return it.\n \"\"\"\n import re\n\n text_toregex = s_thischunk\n\n pattern = r'Address: [0-9A-Z]{1,19}(([\\s]?)([\\,]?)([\\s]?)([\\,]?))'\n pattern += r'[0-9A-Z]{1,19}(([\\s]?)([\\,]?)([\\s]?)([\\,]?))'\n pattern += r'[0-9A-Z]{1,19}(([\\s]?)([\\,]?)([\\s]?)([\\,]?))'\n pattern += r'[0-9A-Z]{1,19}(([\\s]?)([\\,]?)([\\s]?)([\\,]?))'\n pattern += r'[0-9A-Z]{1,19}(([\\s]?)([\\,]?)([\\s]?)([\\,]?))'\n pattern += r'[0-9A-Z]{1,19}(([\\s]?)([\\,]?)([\\s]?)([\\,]?))'\n pattern += r'[0-9A-Z]{1,19}(([\\s]?)([\\,]?)([\\s]?)([\\,]?))'\n pattern += r'[0-9A-Z]{2,19}(([\\s]?)([\\,]?)([\\s]?)([\\,]?))'\n pattern += r'[0-9A-Z]{2,19}(([\\s]?)([\\,]?)([\\s]?)([\\,]?))'\n pattern += r'[0-9A-Z]{2,19}(([\\s]?)([\\,]?)([\\s]?)([\\,]?))'\n pattern += r'[0-9A-Z]{2,19}(([\\s]?)([\\,]?)([\\s]?)([\\,]?))'\n pattern += r'[0-9A-Z]{2,19}(([\\s]?)([\\,]?)([\\s]?)([\\,]?))'\n address = re.search(pattern, text_toregex)\n\n if address != None:\n address = address.group()\n address = address.replace(',', '')\n address = address.replace(\"Address: \", '')\n address = address.replace('\\n', ' ')\n else:\n address = ''\n\n return address\n##----------------------------------------------------------------------------\ndef acs_regexer(s_thischunk):\n \"\"\"\n This function should run the regex functions on l_thischunk and give an\n output of the values in a dictionary as its return.\n \"\"\"\n import re\n d_regexoutput = {}\n \n client = acs_regex_clientgrabber(s_thischunk)\n casenumb = acs_regex_casenumber(s_thischunk)\n l_allchargecounts = acs_regex_pendingchargecounts(s_thischunk)\n county = acs_regex_countygetter(s_thischunk)\n offensedate = acs_regex_offensedater(s_thischunk)\n address = acs_regex_addresser(s_thischunk)\n\n d_regexoutput['client'] = client\n d_regexoutput['casenumb'] = casenumb\n d_regexoutput['l_allchargecounts'] = l_allchargecounts\n d_regexoutput['county'] = county\n d_regexoutput['offensedate'] = offensedate\n d_regexoutput['address'] = address\n\n return d_regexoutput \n\n##----------------------------------------------------------------------------\n##----------------------------------------------------------------------------\n## FUNCTION CALLS:\n##----------------------------------------------------------------------------\n##-returns all the lines from 'client411.txt' as a list object----------------\nl_ciprslines = acs_ciprs_textcollector()\n\n##---Takes chunks representing each individual file and adds them to a list--\nl_allchunks = acs_ciprs_chunkindexer(l_ciprslines) #\n\n##---Running through files' chunks one-at-a-time------------------------------\nl_allacsoutput = [] # a list for storing the dictionary returns from scraping\nfor s_thischunk in l_allchunks:\n d_regexoutput = acs_regexer(s_thischunk)\n l_allacsoutput.append(d_regexoutput)\n##-----------------Create/reset a csv and rest:\nwith open ('acs_output.csv', 'w+') as fo_acso:\n s = 'Client:' + ',' + 'Case Number:' + ',' + 'Count 1:' + ',' + 'Count 2:'\n s += ',' + 'Count 3:' + ',' + 'Offense Date:' + ',' + 'County:' + ',' \n s += 'Address:'\n fo_acso.write(s + '\\n')\nimport time\nprint (\"Resetting/establishing a file.\")\ntime.sleep(2.5)\n\n##----------------------------------------------------------------------------\n##---Next, run through the list of dictionaries, each having the values for\n##---you to input into your vcard writer function\n##----------------------------------------------------------------------------\nl_csvoutput = []\nfor d in l_allacsoutput:\n client = d['client']\n casenumb = d['casenumb'] \n l_allchargecounts = d['l_allchargecounts'] \n county = d['county'] \n offensedate = d['offensedate'] \n address = d['address']\n\n count1 = ''\n count2 = ''\n count3 = ''\n\n\n if l_allchargecounts != []:\n c = 0\n for count in l_allchargecounts:\n if c == 0:\n count1 = count\n c += 1\n elif c == 1:\n count2 = count\n elif c == 2:\n count3 = count\n else:\n pass\n \n csv_output = client + ',' + casenumb + ',' +count1 + ',' + count2 + ','\n csv_output += count3 + ',' + offensedate + ',' + county + ',' + address\n l_csvoutput.append(csv_output)\n\n with open ('acs_output.csv', 'a') as fo_acso:\n fo_acso.write(csv_output)\n fo_acso.write('\\n')\n\n##---------------- This is where we call the code for vcard writer:\n##---------------- It should output VCARDs (.vcf files)\n\ndef acs_vcardwriter(l_csvoutput):\n \"\"\"\n This code is borrowed from my prior code. Change as needed!\n This function should start with a template VCARD and write an output\n file for each client.\n \n The VCARD template should be in the same directory\n as this '.py' file.\n \"\"\"\n \n x = 0\n for line in l_csvoutput:\n s_x = str(x)\n class D():\n pass\n linesplit = line.split(',')\n D.clientfullname = linesplit[0]\n D.clientpathname = D.clientfullname.replace(' ', '_')\n namesplit = D.clientfullname.split()\n D.clientlast = namesplit[1]\n D.clientfirst = namesplit[0]\n D.address = linesplit[7]\n D.case = linesplit[1]\n\n with open ('vcard_template.vcf', 'r') as fo_vc:\n vclines = fo_vc.readlines()\n\n D.fullpath = s_x + D.clientpathname + '.vcf'\n with open (D.fullpath,'w+') as fo_fp:\n ##--------------------------\n fo_fp.write(vclines[0])\n ##--------------------------\n fo_fp.write(vclines[1])\n ##--------------------------\n fo_fp.write(vclines[2])\n ##--------------------------\n namefield = vclines[3]\n namefield = namefield.replace('Basicfirst', D.clientfirst)\n namefield = namefield.replace('Basiclast',D.clientlast)\n fo_fp.write(namefield)\n ##--------------------------\n fnfield = vclines[4]\n fnfield = fnfield.replace('Basicfirst', D.clientfirst)\n fnfield = fnfield.replace('Basiclast',D.clientlast)\n fo_fp.write(fnfield)\n ##--------------------------\n orgfield = vclines[5]\n fo_fp.write(orgfield)\n ##--------------------------\n emlfield = vclines[6]\n fo_fp.write(emlfield)\n ##--------------------------\n telfield = vclines[7]\n fo_fp.write(telfield)\n ##--------------------------\n address = D.address\n adrsplit = address.split()\n adrcity = adrsplit[-3]\n adrstate = adrsplit[-2]\n adrzip = adrsplit[-1]\n del adrsplit[-1]\n del adrsplit[-1]\n del adrsplit[-1]\n adrhome = ' '.join(adrsplit)\n adrfield = vclines[8]\n adrfield = adrfield.replace('homeaddress',adrhome)\n adrfield = adrfield.replace('homecity',adrcity)\n adrfield = adrfield.replace('NC',adrstate)\n adrfield = adrfield.replace('zip',adrzip)\n fo_fp.write(adrfield)\n ##--------------------------\n notefield = vclines[9]\n notefield = notefield.replace(\"''\",D.case)\n fo_fp.write(notefield)\n bdayfield = vclines[10]\n fo_fp.write(bdayfield)\n ################# UID code\n uidfield = vclines[11]\n import datetime #code for datestamp. Reusable, but current\n ## purpose is for part of a unique UID\n dt = '{date:%Y%m%d}'.format( date=datetime.datetime.now())\n time = '{date:%H%M%S}'.format( date=datetime.datetime.now())\n dtstamp = str(dt) + \"T\" + str(time) + \"Z\"\n ## So now dtstamp is just the current time/date\n i_x = str(x) # this should give us a unique string from the\n ## index of the 'line' (from personal csv), allowing us to\n ## prepend it to the UID and know it's unique\n v = D.clientfirst\n v += \"_\"\n v += D.clientlast\n v += '_' + i_x + dtstamp\n uidfield = uidfield.replace('##', v)\n fo_fp.write(uidfield)\n #################\n s = vclines[-1]\n fo_fp.write(s)\n x += 1\n \n########################################################################\n## FUNCTION CALLS:\n########################################################################\nacs_vcardwriter(l_csvoutput) \n", "sub_path": "adze_ciprs_scraper.py", "file_name": "adze_ciprs_scraper.py", "file_ext": "py", "file_size_in_byte": 15514, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "re.search", "line_number": 102, "usage_type": "call"}, {"api_name": "re.search", "line_number": 133, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 156, "usage_type": "call"}, {"api_name": "re.search", "line_number": 182, "usage_type": "call"}, {"api_name": "re.search", "line_number": 202, "usage_type": "call"}, {"api_name": "re.search", "line_number": 232, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 291, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 415, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 415, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 416, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 416, "usage_type": "attribute"}]} +{"seq_id": "126465188", "text": "from flask import Flask, render_template, request\nimport sqlite3\nimport socket\nimport pandas as pd\nimport datetime\n\napp = Flask(__name__)\n\n\n# conn = sqlite3.connect('csc455_HW3.db')\n#\n# with open('bincom_test.sql', 'r') as sql_file:\n# conn.executescript(sql_file.read())\n#\n# conn.close()\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n\n@app.route(\"/one/\")\ndef quest_1():\n conn = sqlite3.connect(\"csc455_HW3.db\")\n cursor = conn.cursor()\n poll_unit = cursor.execute(\"SELECT * FROM announced_pu_results WHERE polling_unit_uniqueid = '19'\")\n\n # print(f\"{row[2]} = {row[3]}\")\n return render_template(\"q1.html\", polls=poll_unit)\n\n\n@app.route(\"/two/\", methods=['GET', 'POST'])\ndef quest_2():\n show = False\n if request.method == \"POST\":\n note = \"\"\n show = True\n conn = sqlite3.connect(\"csc455_HW3.db\")\n cursor = conn.cursor()\n # print(cursor)\n\n data = request.form\n lga_num = int(data['lga'])\n # print(type(lga_num))\n # print(lga_num)\n for row in cursor.execute(f\"SELECT * FROM polling_unit WHERE lga_id = {lga_num}\"):\n # print(row)\n polls = cursor.execute(f\"SELECT * FROM announced_pu_results WHERE polling_unit_uniqueid = {str(row[1])}\")\n try:\n # print(polls.fetchone()[0])\n check = polls.fetchone()[0]\n except TypeError:\n note = \"No result for LGA\"\n # print(\"no value\")\n # for r in cursor.execute(f\"SELECT * FROM announced_pu_results WHERE polling_unit_uniqueid = {str(row[1])}\"):\n # print(f\"{r[2]} = {r[3]}\")\n\n return render_template(\"q2.html\", show=show, polls=polls, note=note)\n\n return render_template(\"q2.html\", show=show)\n\n\n@app.route(\"/three/\", methods=['GET', 'POST'])\ndef quest_3():\n show = False\n if request.method == \"POST\":\n show = True\n\n data = request.form\n pdp = int(data['pdp'])\n dpp = int(data['dpp'])\n acn = int(data['acn'])\n ppa = int(data['ppa'])\n cdc = int(data['cdc'])\n jp = int(data['jp'])\n anpp = int(data['anpp'])\n labo = int(data['labo'])\n cpp = int(data['cpp'])\n name = data['name']\n date = datetime.datetime.now()\n ip = socket.gethostbyname(socket.gethostname())\n # uniqueid = '29'\n # print(data)\n # print(f\"{pdp}, {dpp}, {acn}, {ppa}, {cdc}, {jp}, {anpp}, {labo}, {cpp}\")\n\n conn = sqlite3.connect(\"csc455_HW3.db\")\n cursor = conn.cursor()\n\n num = 0\n for row in cursor.execute(f\"SELECT * FROM announced_pu_results\"):\n if num < row[0]:\n num = row[0]\n # print(row[0])\n id = num\n\n unique = 0\n for row in cursor.execute(f\"SELECT * FROM announced_pu_results\"):\n if unique < int(row[1]):\n unique = int(row[1])\n # print(row[0])\n uniqueid = unique + 1\n # print(uniqueid)\n\n mySql_insert_query = f\"\"\"INSERT INTO announced_pu_results (result_id, polling_unit_uniqueid,\n party_abbreviation, party_score, entered_by_user, date_entered, user_ip_address)\n VALUES (?, ?, ?, ?, ?, ?, ?)\n \"\"\"\n\n # cursor.execute(mySql_insert_query)\n\n records_to_insert = [((id+1), uniqueid, 'PDP', pdp, name, date, ip),\n ((id+2), uniqueid, 'DPP', dpp, name, date, ip),\n ((id+3), uniqueid, 'ACN', acn, name, date, ip),\n ((id+4), uniqueid, 'PPA', ppa, name, date, ip),\n ((id+5), uniqueid, 'CDC', cdc, name, date, ip),\n ((id+6), uniqueid, 'JP', jp, name, date, ip),\n ((id+7), uniqueid, 'ANPP', anpp, name, date, ip),\n ((id+8), uniqueid, 'LABO', labo, name, date, ip),\n ((id+9), uniqueid, 'CPP', cpp, name, date, ip)]\n cursor.executemany(mySql_insert_query, records_to_insert)\n\n polls = cursor.execute(f\"SELECT * FROM announced_pu_results WHERE polling_unit_uniqueid = '{uniqueid}'\")\n\n return render_template(\"q3.html\", show=show, polls=polls)\n\n return render_template(\"q3.html\", show=show)\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n", "sub_path": "bincom_test/flask_app.py", "file_name": "flask_app.py", "file_ext": "py", "file_size_in_byte": 4413, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 80, "usage_type": "attribute"}, {"api_name": "socket.gethostbyname", "line_number": 81, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 81, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "381711817", "text": "\"\"\"\n Simple spell-checker\n https://www.kaggle.com/c/csc-iinlp-2017-please-feax-me/\n\"\"\"\nimport codecs\nimport csv\nimport time\nfrom collections import Counter, defaultdict\nfrom pyxdameraulevenshtein import damerau_levenshtein_distance\nfrom functools import lru_cache\n\nimport nltk\nfrom nltk.corpus import stopwords\nimport numpy as np\nimport pandas as pd\nimport pymorphy2\nimport re\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport string\n\nnltk.download('stopwords')\nall_stopwords = stopwords.words('russian') + stopwords.words('english')\n\n\nclass WordDict:\n _END = '__end__'\n\n def __init__(self):\n self.root = {}\n\n def __contains__(self, item):\n node = self.root\n for letter in item:\n letter = letter.lower()\n if letter not in node.keys():\n return False\n\n node = node[letter]\n else:\n return self._END in node\n\n def __add_item(self, word):\n node = self.root\n for letter in word:\n letter = letter.lower()\n node = node.setdefault(letter, {})\n node[self._END] = self._END\n \n def collect(self, words):\n for word in words:\n self.__add_item(word)\n return self\n\n\nclass StatisticalSpeller(object):\n \"\"\"\n Поиск слов, наиболее близких по числу общих n-грамм и\n последующее ранжирование по эвристике-близости\n \"\"\"\n\n __pron_set = ['то', 'та', 'те', 'так', 'это', 'эта', 'эти',\n 'той', 'тем', 'там', 'том', 'тех',\n 'этих', 'этой', 'этом', 'согласно']\n\n @staticmethod\n def tokenize(text):\n return ['^'] + [t for t in text.split()] + ['$']\n\n def __init__(self, n_candidates_search=150):\n \"\"\"\n :param n_candidates_search: число кандидатов-строк при поиске\n \"\"\"\n self.n_candidates = n_candidates_search\n self.morph = pymorphy2.MorphAnalyzer()\n\n # векторайзеры для нграмного индекса и частотного словаря\n self.vectorizer = CountVectorizer(analyzer=\"char_wb\", ngram_range=(2, 3), binary=True)\n self.voc_vectorizer = CountVectorizer(tokenizer=self.tokenize, ngram_range=(2, 2))\n\n # нграмный индекс + частотный словарь биграм по корпусу текстов\n self.index = defaultdict(set)\n self.voc = defaultdict(int)\n self.words_list = None\n\n # регэкспы для битых предлогов\n self.on_prep = re.compile(r'\\b(н{2,}а|на{2,})\\b')\n self.year = re.compile(r'^[12]\\d{3}')\n\n def fit(self, words_list):\n \"\"\"\n Подгонка спеллера\n \"\"\"\n\n checkpoint = time.time()\n self.words_list = words_list\n\n encoded_words = self.vectorizer.fit_transform(words_list).tocoo()\n\n # строим словарь, отображающий идентификатор нграммы в множество термов\n for ind in zip(encoded_words.row, encoded_words.col):\n self.index[ind[1]].add(ind[0])\n\n print(\"Speller fitted in\", time.time() - checkpoint)\n\n return self\n\n def fit_texts(self, texts):\n checkpoint = time.time()\n words_vocab = self.voc_vectorizer.fit_transform(texts).tocoo()\n self.voc = dict(zip(\n sorted(self.voc_vectorizer.vocabulary_.values()),\n words_vocab.sum(axis=0).A1))\n\n print(\"Speller fitted for texts in\", time.time() - checkpoint)\n\n @lru_cache(maxsize=1000000)\n def rectify(self, word, prev_word):\n \"\"\"\n Предсказания спеллера\n \"\"\"\n\n if word == ',,':\n return ','\n if word == '..':\n return '...'\n\n # запрос, преобразованный в нграммы\n char_ngrams_list = self.vectorizer.transform([word]).tocoo().col\n\n # подбираем число кандидатов по длине запроса\n self.n_candidates = 350 if len(word) <= 4 else 250 if len(word) <= 7 else self.n_candidates\n\n # для каждого терма считаем совпадение по нграммам\n counter = Counter()\n\n for token_id in char_ngrams_list:\n for word_id in self.index[token_id]:\n counter[word_id] += 1\n\n # среди топа по совпадениям по нграммам ищем \"хорошее\" исправление\n\n # используем модифицированное расстояние Левенштейна (с перестановками)\n # а также ищем слово с минимальным количеством новых букв\n suggests = list()\n for suggest in counter.most_common(n=self.n_candidates):\n sugg = self.words_list[suggest[0]]\n dist = damerau_levenshtein_distance(sugg, word)\n context_list = self.voc_vectorizer.transform([f\"{prev_word} {sugg}\"]).tocoo().col.tolist()\n if dist <= 5:\n suggs = [(sugg, dist, 0.0)]\n if context_list:\n suggs = [(sugg, dist, self.voc.get(context, 0.0)) for context in context_list]\n\n suggests.extend(suggs)\n\n suggests = sorted(suggests, key=lambda tup: tup[1])\n\n minimal_distance = min(suggest[1] for suggest in suggests)\n candidates = sorted(\n [(suggest[0], suggest[2]) for suggest in suggests\n if suggest[1] == minimal_distance and set(suggest[0]) == set(word)],\n key=lambda tup: -tup[1])\n\n return candidates[0][0] if candidates and candidates[0][1] > 0 else suggests[0][0]\n\n # ищем тег среди разборов одного слова\n def __tag_in_parse(self, tag_name, word):\n return any(tag_name in parse.tag for parse in self.morph.parse(word))\n\n # строим эвристики для битых предлогов\n def need_fix_prep(self, word, prep):\n if prep == 'е':\n if self.__tag_in_parse('VERB', word) \\\n or word in ['только', 'более', 'менее', 'больше', 'меньше']:\n return 'не'\n else:\n return prep\n elif prep == 'аа':\n return 'а'\n elif prep == 'даа' or prep == 'дда':\n return 'да'\n elif prep == 'ии':\n return 'и'\n elif prep == 'илли' or prep == 'иили':\n return 'или'\n elif prep == 'отт':\n return 'от'\n elif prep == 'ри':\n return 'при'\n elif prep in ['ыб', 'бл']:\n return 'был'\n elif prep in ['ым', 'ыт', 'ыв']:\n return prep[::-1]\n elif prep in ['зи', 'ов', 'од', 'ан', 'оп', 'ми', 'хи', 'ен']:\n if not self.__tag_in_parse('PREP', word):\n return prep[::-1]\n else:\n return prep\n elif prep == 'аз':\n if self.__tag_in_parse('accs', word):\n return prep[::-1]\n elif 'VERB' in self.morph.parse(word)[0].tag:\n return 'раз'\n else:\n return prep\n elif prep == 'в':\n if word == 'время':\n return 'во'\n else:\n return prep\n elif prep == 'д':\n if word not in string.punctuation \\\n and word not in '.. ... ,,'.split():\n return 'до'\n else:\n return prep\n elif prep == 'з':\n if len(word) > 1:\n if self.__tag_in_parse('gent', word):\n return 'из'\n elif self.__tag_in_parse('accs', word) \\\n or self.__tag_in_parse('ablt', word):\n return 'за'\n else:\n return prep\n else:\n return prep\n elif prep == 'н':\n if len(word) > 1:\n if self.__tag_in_parse('accs', word) \\\n or self.__tag_in_parse('loct', word) \\\n or self.__tag_in_parse('loc2', word):\n return 'на'\n elif 'VERB' in self.morph.parse(word)[0].tag:\n return 'он'\n else:\n return prep\n else:\n return prep\n elif prep == 'п':\n if self.__tag_in_parse('datv', word) \\\n or self.__tag_in_parse('loct', word) \\\n or self.__tag_in_parse('loc2', word) or word.isdigit():\n return 'по'\n else:\n return prep\n elif prep == 'т':\n if len(word) > 1:\n if self.__tag_in_parse('gent', word):\n return 'от'\n elif self.__tag_in_parse('ablt', word) or word in ['же', 'есть']:\n return 'то'\n elif self.__tag_in_parse('femn', word):\n return 'та'\n else:\n return prep\n else:\n return prep\n elif prep == 'х':\n if word not in string.punctuation and not word.isdigit():\n return 'их'\n else:\n return prep\n elif prep == 'чо':\n return 'что'\n elif prep == 'о':\n if word == 'время':\n return 'во'\n else:\n return prep\n elif prep == 'ноо':\n if not word.isalpha():\n return 'но'\n else:\n return prep\n elif prep == 'кк':\n if 'datv' in self.morph.parse(word)[0].tag:\n return 'к'\n elif word not in string.punctuation:\n return 'как'\n else:\n return prep\n elif prep == 'оо':\n if self.__tag_in_parse('loct', word):\n return 'о'\n else:\n return prep\n elif prep == 'сс':\n if self.__tag_in_parse('gent', word) \\\n or self.__tag_in_parse('ablt', word) \\\n or self.year.search(word):\n return 'с'\n else:\n return prep\n elif self.on_prep.search(prep):\n if self.__tag_in_parse('accs', word) \\\n or self.__tag_in_parse('loct', word) \\\n or self.__tag_in_parse('loc2', word) \\\n or word.isdigit():\n return 'на'\n else:\n return prep\n elif prep == 'пр':\n if self.__tag_in_parse('loct', word):\n return 'при'\n elif self.__tag_in_parse('accs', word):\n return 'про'\n else:\n return prep\n elif prep == 'эо':\n return 'это'\n elif prep == 'эт':\n if self.__tag_in_parse('femn', word):\n if self.__tag_in_parse('accs', word):\n return 'эту'\n elif self.__tag_in_parse('gent', word) \\\n or self.__tag_in_parse('datv', word):\n return 'этой'\n else:\n return 'эта'\n elif self.__tag_in_parse('masc', word) \\\n and not self.__tag_in_parse('ablt', word):\n return 'этот'\n else:\n return 'это'\n else:\n return prep\n\n def need_fix_prep_after_words(self, word, prep, next_word, ind):\n if prep == 'вв':\n if ind == 0 or (ind - 1 >= 0 and 'ivx' not in word):\n return 'в'\n elif next_word == 'время':\n return 'во'\n else:\n return prep\n elif prep == 'тс' and ind - 1 >= 0 and word.isdigit():\n return 'тыс'\n elif prep == 'мк' and word.isdigit():\n return prep[::-1]\n elif prep == 'е':\n if word in self.__pron_set:\n return 'же'\n elif self.__tag_in_parse('Name', word):\n return 'де'\n elif next_word != '' and next_word not in string.punctuation:\n return 'ее'\n else:\n return prep\n elif prep == 'ж':\n if word in self.__pron_set:\n return 'же'\n else:\n return prep\n elif prep == 'ил':\n if self.__tag_in_parse('VERB', word):\n return 'ли'\n else:\n return 'или'\n else:\n return prep\n\n\nif __name__ == \"__main__\":\n\n np.random.seed(0)\n\n # зачитываем словарь \"правильных слов\"\n words_set = set(line.strip() for line in codecs.open(\"../resources/words_dict.txt\", \"r\", encoding=\"utf-8\"))\n \n words_dict = WordDict()\n words_dict.collect(words_set)\n\n # создаём спеллер\n speller = StatisticalSpeller()\n speller.fit(sorted(list(words_set)))\n\n # читаем выборку из правильных текстов\n df = pd.read_csv(\"../resources/corrected_texts.csv\")\n\n speller.fit_texts(list(df[\"text\"]))\n\n # читаем выборку\n df = pd.read_csv(\"../resources/broken_texts.csv\")\n\n checkpoint1 = time.time()\n total_rectification_time = 0.0\n total_sentences_rectifications = 0.0\n\n y_submission = []\n counts = 0\n\n # исправляем, попутно собирая счётчики и засекая время\n for i in range(df.shape[0]):\n\n counts += 1\n\n if counts % 100 == 0:\n print(\"Rows processed\", counts)\n\n start = time.time()\n mispelled_text = df[\"text\"][i]\n mispelled_tokens = mispelled_text.split()\n\n was_rectified = False\n\n # для каждого слова из текста поступаем следующим образом:\n # если слово отсутствует в словаре, то подбираем ему наилучшее исправление\n # далее при наличие слева стопслова с опечаткой пытаемся его исправить с помощью простых эвристик\n for j, mispelled_token in enumerate(mispelled_tokens):\n if mispelled_token not in all_stopwords and mispelled_token not in words_dict:\n prev_token = mispelled_tokens[j - 1] if j > 0 else '^'\n rectified_token = speller.rectify(mispelled_token, prev_token)\n mispelled_tokens[j] = rectified_token\n if j - 1 >= 0:\n mispelled_tokens[j - 1] = speller.need_fix_prep(rectified_token, mispelled_tokens[j - 1])\n was_rectified = True\n elif mispelled_token in words_dict:\n mispelled_tokens[j - 1] = speller.need_fix_prep(mispelled_token, mispelled_tokens[j - 1])\n nw = mispelled_tokens[j + 1] if j + 1 < len(mispelled_tokens) else ''\n mispelled_tokens[j] = speller.need_fix_prep_after_words(mispelled_tokens[j - 1],\n mispelled_token, nw, j)\n was_rectified = True\n\n if was_rectified:\n mispelled_text = \" \".join(mispelled_tokens)\n total_rectification_time += time.time() - start\n total_sentences_rectifications += 1.0\n\n y_submission.append(mispelled_text)\n\n checkpoint2 = time.time()\n\n print(\"elapsed\", checkpoint2 - checkpoint1)\n print(\"average speller time\", total_rectification_time / float(total_sentences_rectifications))\n\n submission = pd.DataFrame({\"id\": df[\"id\"], \"text\": y_submission}, columns=[\"id\", \"text\"])\n submission.to_csv(\"baseline_submission.csv\", index=None, encoding=\"utf-8\", quotechar='\"',\n quoting=csv.QUOTE_NONNUMERIC)\n", "sub_path": "src/russian/SpellChecker.py", "file_name": "SpellChecker.py", "file_ext": "py", "file_size_in_byte": 16345, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "nltk.download", "line_number": 21, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 22, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 22, "usage_type": "name"}, {"api_name": "pymorphy2.MorphAnalyzer", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 77, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 78, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 81, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 82, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 86, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 87, "usage_type": "call"}, {"api_name": "time.time", "line_number": 94, "usage_type": "call"}, {"api_name": "time.time", "line_number": 103, "usage_type": "call"}, {"api_name": "time.time", "line_number": 108, "usage_type": "call"}, {"api_name": "time.time", "line_number": 114, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 134, "usage_type": "call"}, {"api_name": "pyxdameraulevenshtein.damerau_levenshtein_distance", "line_number": 147, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 116, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 212, "usage_type": "attribute"}, {"api_name": "string.punctuation", "line_number": 260, "usage_type": "attribute"}, {"api_name": "string.punctuation", "line_number": 279, "usage_type": "attribute"}, {"api_name": "string.punctuation", "line_number": 346, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 366, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 366, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 369, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 379, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 384, "usage_type": "call"}, {"api_name": "time.time", "line_number": 386, "usage_type": "call"}, {"api_name": "time.time", "line_number": 401, "usage_type": "call"}, {"api_name": "time.time", "line_number": 427, "usage_type": "call"}, {"api_name": "time.time", "line_number": 432, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 437, "usage_type": "call"}, {"api_name": "csv.QUOTE_NONNUMERIC", "line_number": 439, "usage_type": "attribute"}]} +{"seq_id": "296392357", "text": "import codecs\nimport copy\nimport glob\nimport os\nimport pickle\nimport random\nimport shutil\nimport time\nimport sklearn\nimport spacy\nimport sys\n\nif os.name == 'nt':\n module_path = os.path.abspath(os.path.join(\"..\\\\\"))\nelse:\n module_path = os.path.abspath(os.path.join('../'))\n\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nfrom ner import Ner\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport tensorflow as tf\nfrom tensorflow.contrib.tensorboard.plugins import projector\n\nimport train\nimport dataset\nfrom entity_lstm import EntityLSTM\nimport utils\nimport conll_to_brat\nimport evaluate\nimport brat_to_conll\nimport utils_nlp\nimport prepare_pretrained_model\n\nclass neuroner(Ner):\n\n prediction_count = 0\n\n def __init__(self, **kwargs):\n \"\"\"\n Set parameters for class variables\n \"\"\"\n # Set parameters\n self.parameters, self.conf_parameters = utils.load_parameters(**kwargs)\n self.loadmodel = False\n self.training = False\n\n def _create_stats_graph_folder(self, parameters):\n \"\"\"\n Initialize stats_graph_folder.\n\n Args:\n parameters (dict): {key: value}, dictionary with parameters as key along with their corresponding value\n Returns:\n stats_graph_folder (str) : path to the folder created\n Raises:\n None\n \"\"\"\n experiment_timestamp = utils.get_current_time_in_miliseconds()\n dataset_name = utils.get_basename_without_extension(parameters['dataset_text_folder'])\n model_name = '{0}_{1}'.format(dataset_name, experiment_timestamp)\n utils.create_folder_if_not_exists(parameters['output_folder'])\n\n # Folder where to save graphs\n stats_graph_folder = os.path.join(parameters['output_folder'], model_name)\n utils.create_folder_if_not_exists(stats_graph_folder)\n return stats_graph_folder, experiment_timestamp\n\n def _get_valid_dataset_filepaths(self, parameters, dataset_types=['train', 'valid', 'test']):\n \"\"\"\n Get paths for the datasets. Also converts the dataset from CoNLL format to BRAT (specific to implmentation) format if not already done.\n\n Args:\n parameters (dict): {key: value}, dictionary with parameters as key along with their corresponding value\n dataset_types (list): types of datasets given eg. train dataset, test dataset etc.\n Returns:\n dataset_filepaths (dict) : {key: value}, dictionary with key as type (train, valid, test) of dataset and value as file paths with corresponding data in CoNLL format\n dataset_brat_folders (dict) : {key: value}, dictionary with key as type (train, valid, test) of dataset and value as folders with corresponding data in BRAT format\n Raises:\n None\n \"\"\"\n dataset_filepaths = {}\n dataset_brat_folders = {}\n\n for dataset_type in dataset_types:\n dataset_filepaths[dataset_type] = os.path.join(parameters['dataset_text_folder'],\n '{0}.txt'.format(dataset_type))\n if not os.path.isfile(dataset_filepaths[dataset_type]):\n dataset_filepaths[dataset_type] = os.path.join(parameters['dataset_text_folder'],\n self.filename)\n dataset_brat_folders[dataset_type] = os.path.join(parameters['dataset_text_folder'],\n dataset_type)\n dataset_compatible_with_brat_filepath = os.path.join(parameters['dataset_text_folder'],\n '{0}_compatible_with_brat.txt'.format(dataset_type))\n\n # Conll file exists\n if os.path.isfile(dataset_filepaths[dataset_type]) \\\n and os.path.getsize(dataset_filepaths[dataset_type]) > 0:\n # Brat text files exist\n if os.path.exists(dataset_brat_folders[dataset_type]) and \\\n len(glob.glob(os.path.join(dataset_brat_folders[dataset_type], '*.txt'))) > 0:\n\n # Check compatibility between conll and brat files\n brat_to_conll.check_brat_annotation_and_text_compatibility(dataset_brat_folders[dataset_type])\n if os.path.exists(dataset_compatible_with_brat_filepath):\n dataset_filepaths[dataset_type] = dataset_compatible_with_brat_filepath\n conll_to_brat.check_compatibility_between_conll_and_brat_text(dataset_filepaths[dataset_type],\n dataset_brat_folders[dataset_type])\n\n # Brat text files do not exist\n else:\n # Populate brat text and annotation files based on conll file\n conll_to_brat.conll_to_brat(dataset_filepaths[dataset_type],\n dataset_compatible_with_brat_filepath, dataset_brat_folders[dataset_type],\n dataset_brat_folders[dataset_type])\n dataset_filepaths[dataset_type] = dataset_compatible_with_brat_filepath\n\n # Conll file does not exist\n else:\n # Brat text files exist\n if os.path.exists(dataset_brat_folders[dataset_type]) \\\n and len(glob.glob(os.path.join(dataset_brat_folders[dataset_type], '*.txt'))) > 0:\n dataset_filepath_for_tokenizer = os.path.join(parameters['dataset_text_folder'],\n '{0}_{1}.txt'.format(dataset_type, parameters['tokenizer']))\n if os.path.exists(dataset_filepath_for_tokenizer):\n conll_to_brat.check_compatibility_between_conll_and_brat_text(dataset_filepath_for_tokenizer,\n dataset_brat_folders[dataset_type])\n else:\n # Populate conll file based on brat files\n brat_to_conll.brat_to_conll(dataset_brat_folders[dataset_type],\n dataset_filepath_for_tokenizer, parameters['tokenizer'], parameters['spacylanguage'])\n dataset_filepaths[dataset_type] = dataset_filepath_for_tokenizer\n\n # Brat text files do not exist\n else:\n del dataset_filepaths[dataset_type]\n del dataset_brat_folders[dataset_type]\n continue\n\n if parameters['tagging_format'] == 'bioes':\n # Generate conll file with BIOES format\n bioes_filepath = os.path.join(parameters['dataset_text_folder'],\n '{0}_bioes.txt'.format(utils.get_basename_without_extension(dataset_filepaths[dataset_type])))\n utils_nlp.convert_conll_from_bio_to_bioes(dataset_filepaths[dataset_type],\n bioes_filepath)\n dataset_filepaths[dataset_type] = bioes_filepath\n\n return dataset_filepaths, dataset_brat_folders\n\n def _check_param_compatibility(self, parameters, dataset_filepaths):\n \"\"\"\n Check whether parameters are compatible.\n\n Args:\n parameters (dict): {key: value}, dictionary with parameters as key along with their corresponding value\n dataset_filepaths (type): description.\n Returns:\n None\n Raises:\n None\n \"\"\"\n utils.check_param_compatibility(parameters, dataset_filepaths)\n\n def predict_text(self, text):\n \"\"\"\n Makes predictions on a given input text.\n Args:\n text (str): string of text on which to make prediction\n Returns:\n predictions: [tuple,...], i.e. list of tuples.\n Each tuple is (start index, span, mention text, mention type)\n Where:\n - start index: int, the index of the first character of the mention span. None if not applicable.\n - span: int, the length of the mention. None if not applicable.\n - mention text: str, the actual text that was identified as a named entity. Required.\n - mention type: str, the entity/mention type. None if not applicable.\n NOTE: len(predictions) should equal len(data) AND the ordering should not change [important for\n evalutation. See note in evaluate() about parallel arrays.]\n Raises:\n None\n \"\"\"\n # IMPLEMENT PREDICTION.\n self.prediction_count += 1\n\n if self.prediction_count == 1:\n self.parameters['dataset_text_folder'] = os.path.join('.', 'data', 'temp')\n self.stats_graph_folder, _ = self._create_stats_graph_folder(self.parameters)\n\n # Update the deploy folder, file, and modeldata\n dataset_type = 'deploy'\n\n # Delete all deployment data\n for filepath in glob.glob(os.path.join(self.parameters['dataset_text_folder'],\n '{0}*'.format(dataset_type))):\n if os.path.isdir(filepath):\n shutil.rmtree(filepath)\n else:\n os.remove(filepath)\n\n # Create brat folder and file\n dataset_brat_deploy_folder = os.path.join(self.parameters['dataset_text_folder'],\n dataset_type)\n utils.create_folder_if_not_exists(dataset_brat_deploy_folder)\n dataset_brat_deploy_filepath = os.path.join(dataset_brat_deploy_folder,\n 'temp_{0}.txt'.format(str(self.prediction_count).zfill(5)))\n # self._get_dataset_brat_deploy_filepath(dataset_brat_deploy_folder)\n with codecs.open(dataset_brat_deploy_filepath, 'w', 'UTF-8') as f:\n f.write(text)\n\n # Update deploy filepaths\n dataset_filepaths, dataset_brat_folders = self._get_valid_dataset_filepaths(self.parameters,\n dataset_types=[dataset_type])\n self.dataset_filepaths.update(dataset_filepaths)\n self.dataset_brat_folders.update(dataset_brat_folders)\n\n # Update the dataset for the new deploy set\n self.modeldata.update_dataset(self.dataset_filepaths, [dataset_type])\n\n # Predict labels and output brat\n output_filepaths = {}\n prediction_output = train.prediction_step(self.sess, self.modeldata,\n dataset_type, self.model, self.transition_params_trained,\n self.stats_graph_folder, self.prediction_count, self.parameters,\n self.dataset_filepaths)\n\n _, _, output_filepaths[dataset_type] = prediction_output\n conll_to_brat.output_brat(output_filepaths, self.dataset_brat_folders,\n self.stats_graph_folder, overwrite=True)\n\n # Print and output result\n text_filepath = os.path.join(self.stats_graph_folder, 'brat', 'deploy',\n os.path.basename(dataset_brat_deploy_filepath))\n annotation_filepath = os.path.join(self.stats_graph_folder, 'brat',\n 'deploy', '{0}.ann'.format(\n utils.get_basename_without_extension(dataset_brat_deploy_filepath)))\n text2, entities = brat_to_conll.get_entities_from_brat(text_filepath,\n annotation_filepath, verbose=True)\n\n if self.parameters['tokenizer'] == 'spacy':\n spacy_nlp = spacy.load(self.parameters['spacylanguage'])\n\n tokens = spacy_nlp(text)\n predictions = []\n pred_tuple = prediction_output[0]\n\n for i, token in enumerate(tokens):\n pred = (token.idx, len(token), token, self.modeldata.index_to_label[pred_tuple[i]])\n predictions.append(pred)\n\n assert (text == text2)\n return predictions\n\n def predict_dataset(self, data, dataset_type='test'):\n \"\"\"\n Makes predictions on a given dataset and returns the predictions in the specified format\n Args:\n dataset: data in arbitrary format as required for testing\n Returns:\n predictions: [tuple,...], i.e. list of tuples.\n Each tuple is (start index, span, mention text, mention type)\n Where:\n - start index: int, the index of the first character of the mention span. None if not applicable.\n - span: int, the length of the mention. None if not applicable.\n - mention text: str, the actual text that was identified as a named entity. Required.\n - mention type: str, the entity/mention type. None if not applicable.\n NOTE: len(predictions) should equal len(data) AND the ordering should not change [important for\n evalutation. See note in evaluate() about parallel arrays.]\n Raises:\n None\n \"\"\"\n # IMPLEMENT PREDICTION.\n\n # Load dataset only when directly loading the model\n\n if self.training:\n self.parameters['use_pretrained_model'] = True\n self.parameters['pretrained_model_folder'] = os.path.join('.', 'output', os.path.basename(self.stats_graph_folder), 'output')\n tf.reset_default_graph()\n\n if self.loadmodel:\n self.dataset_filepaths, self.dataset_brat_folders = self._get_valid_dataset_filepaths(self.parameters)\n self.modeldata = dataset.Dataset(verbose=self.parameters['verbose'], debug=self.parameters['debug'])\n self.token_to_vector = self.modeldata.load_dataset(data, self.dataset_filepaths, self.parameters)\n self.loadmodel = False\n\n # Launch session. Automatically choose a device\n # if the specified one doesn't exist\n if self.parameters['use_pretrained_model']:\n session_conf = tf.ConfigProto(\n intra_op_parallelism_threads=self.parameters['number_of_cpu_threads'],\n inter_op_parallelism_threads=self.parameters['number_of_cpu_threads'],\n device_count={'CPU': 1, 'GPU': self.parameters['number_of_gpus']},\n allow_soft_placement=True,\n log_device_placement=False)\n\n self.sess = tf.Session(config=session_conf)\n with self.sess.as_default():\n\n # Initialize or load pretrained model\n self.model = EntityLSTM(self.modeldata, self.parameters)\n self.sess.run(tf.global_variables_initializer())\n\n self.transition_params_trained = self.model.restore_from_pretrained_model(self.parameters,\n self.modeldata, self.sess,\n token_to_vector=self.token_to_vector)\n\n parameters = self.parameters\n dataset_filepaths = self.dataset_filepaths\n sess = self.sess\n model = self.model\n transition_params_trained = self.transition_params_trained\n stats_graph_folder, experiment_timestamp = self._create_stats_graph_folder(self.parameters)\n\n all_predictions = []\n true_labels = []\n tokens = []\n span = []\n\n output_filepath = os.path.join(stats_graph_folder, '{1:03d}_{0}.txt'.format(dataset_type,\n 0))\n output_file = codecs.open(output_filepath, 'w', 'UTF-8')\n original_conll_file = codecs.open(dataset_filepaths[dataset_type], 'r', 'UTF-8')\n\n modeldata = self.modeldata\n\n for i in range(len(modeldata.token_indices[dataset_type])):\n feed_dict = {\n model.input_token_indices: modeldata.token_indices[dataset_type][i],\n model.input_token_character_indices: modeldata.character_indices_padded[dataset_type][i],\n model.input_token_lengths: modeldata.token_lengths[dataset_type][i],\n model.input_label_indices_vector: modeldata.label_vector_indices[dataset_type][i],\n model.dropout_keep_prob: 1.\n }\n\n unary_scores, predictions = sess.run([model.unary_scores,\n model.predictions], feed_dict)\n\n if parameters['use_crf']:\n predictions, _ = tf.contrib.crf.viterbi_decode(unary_scores,\n transition_params_trained)\n predictions = predictions[1:-1]\n else:\n predictions = predictions.tolist()\n\n assert (len(predictions) == len(modeldata.tokens[dataset_type][i]))\n\n output_string = ''\n prediction_labels = [modeldata.index_to_label[prediction] for prediction in predictions]\n unary_score_list = unary_scores.tolist()[1:-1]\n\n gold_labels = modeldata.labels[dataset_type][i]\n\n if parameters['tagging_format'] == 'bioes':\n prediction_labels = utils_nlp.bioes_to_bio(prediction_labels)\n gold_labels = utils_nlp.bioes_to_bio(gold_labels)\n\n for prediction, token, gold_label, scores in zip(prediction_labels,\n modeldata.tokens[dataset_type][i], gold_labels,\n unary_score_list):\n\n while True:\n line = original_conll_file.readline()\n split_line = line.strip().split(' ')\n\n if '-DOCSTART-' in split_line[0] or len(split_line) == 0 \\\n or len(split_line[0]) == 0:\n continue\n else:\n token_original = split_line[0]\n\n if parameters['tagging_format'] == 'bioes':\n split_line.pop()\n\n gold_label_original = split_line[6]\n\n assert (token == token_original and gold_label == gold_label_original)\n break\n\n split_line.append(prediction)\n if parameters['output_scores']:\n # space separated scores\n scores = ' '.join([str(i) for i in scores])\n split_line.append('{}'.format(scores))\n output_string += ' '.join(split_line) + '\\n'\n\n output_file.write(output_string + '\\n')\n\n predicted_labels = [modeldata.index_to_label[preds] for preds in predictions]\n\n all_predictions.extend(prediction_labels)\n all_predictions.append('')\n true_labels.extend(modeldata.labels[dataset_type][i])\n true_labels.append('')\n tokens.extend(modeldata.tokens[dataset_type][i])\n tokens.append('')\n span.extend(modeldata.token_lengths[dataset_type][i])\n span.append('')\n\n start_index = [None] * len(true_labels)\n\n all_predictions = list(map(list, zip(start_index, span, tokens, all_predictions)))\n all_predictions = [tuple(pred) for pred in all_predictions]\n\n return all_predictions\n\n #@overrides(DITKModel_NER)\n def convert_ground_truth(self, data, *args, **kwargs): # <--- implemented PER class\n \"\"\"\n Converts test data into common format for evaluation [i.e. same format as predict()]\n This added step/layer of abstraction is required due to the refactoring of read_dataset_traint()\n and read_dataset_test() back to the single method of read_dataset() along with the requirement on\n the format of the output of predict() and therefore the input format requirement of evaluate(). Since\n individuals will implement their own format of data from read_dataset(), this is the layer that\n will convert to proper format for evaluate().\n Args:\n data: data in proper [arbitrary] format for train or test. [i.e. format of output from read_dataset]\n args: type of dataset for which to extract ground truth (values: train, dev, test)\n Returns:\n ground_truth: [tuple,...], i.e. list of tuples. [SAME format as output of predict()]\n Each tuple is (start index, span, mention text, mention type)\n Where:\n - start index: int, the index of the first character of the mention span. None if not applicable.\n - span: int, the length of the mention. None if not applicable.\n - mention text: str, the actual text that was identified as a named entity. Required.\n - mention type: str, the entity/mention type. None if not applicable.\n Raises:\n None\n \"\"\"\n # IMPLEMENT CONVERSION. STRICT OUTPUT FORMAT REQUIRED.\n\n\n # Load dataset\n if self.loadmodel:\n self.dataset_filepaths, self.dataset_brat_folders = self._get_valid_dataset_filepaths(self.parameters)\n self.modeldata = dataset.Dataset(verbose=self.parameters['verbose'], debug=self.parameters['debug'])\n self.token_to_vector = self.modeldata.load_dataset(data, self.dataset_filepaths, self.parameters)\n self.loadmodel = False\n\n modeldata = self.modeldata\n\n # return ground_truth\n true_labels = []\n tokens = []\n span = []\n\n dataset_type = 'test'\n\n if len(args)==1:\n dataset_type = 'valid' if args[0] == 'dev' else args[0]\n\n for i in range(len(modeldata.token_indices[dataset_type])):\n true_labels.extend(modeldata.labels[dataset_type][i])\n true_labels.append('')\n tokens.extend(modeldata.tokens[dataset_type][i])\n tokens.append('')\n span.extend(modeldata.token_lengths[dataset_type][i])\n span.append('')\n\n start_index = [None] * len(true_labels)\n\n all_y_true = list(map(list, zip(start_index, span, tokens, true_labels)))\n all_y_true = [tuple(pred) for pred in all_y_true]\n\n return all_y_true\n\n def save_model(self, file=None):\n \"\"\"\n :param file: Where to save the model - Optional function\n :return:\n \"\"\"\n utils.create_folder_if_not_exists(self.modelFolder)\n self.model.saver.save(self.sess, os.path.join(self.modelFolder, 'model_{0:05d}.ckpt'.format(0)))\n\n def load_model(self, file=None):\n \"\"\"\n :param file: From where to load the model - Optional function\n :return:\n \"\"\"\n\n self.parameters['use_pretrained_model'] = True\n self.parameters['pretrained_model_folder'] = file if file!=None else self.parameters['pretrained_model_folder']\n self.loadmodel = True\n\n\n #@overrides(DITKModel_NER)\n def read_dataset(self, file_dict, dataset_name=None, *args, **kwargs): # <--- implemented PER class\n \"\"\"\n Reads a dataset in preparation for train or test. Returns data in proper format for train or test.\n Args:\n fileNames: list-like. List of files representing the dataset to read. Each element is str, representing\n filename [possibly with filepath]\n Returns:\n data: data in arbitrary format for train or test.\n Raises:\n None\n \"\"\"\n # IMPLEMENT READING\n # pass\n standard_split = [\"train\", \"test\", \"dev\"]\n dataset_root = os.path.dirname(file_dict['train'])\n self.parameters['dataset_text_folder'] = dataset_root\n self.filename = os.path.basename(file_dict['train'])\n data = {}\n\n try:\n for split in standard_split:\n file = file_dict[split]\n with open(file, mode='r', encoding='utf-8') as f:\n raw_data = f.read().splitlines()\n for i, line in enumerate(raw_data):\n if len(line.strip()) > 0:\n raw_data[i] = line.strip().split()\n else:\n raw_data[i] = list(line)\n data[split] = raw_data\n except KeyError:\n raise ValueError(\"Invalid file_dict. Standard keys (train, test, dev)\")\n except Exception as e:\n print('Something went wrong.', e)\n return data\n\n #@overrides(DITKModel_NER)\n def train(self, data, *args, **kwargs):\n \"\"\"\n Trains a model on the given input data\n Args:\n data: iterable of arbitrary format. represents the data instances and features and labels you use to train your model.\n Returns:\n ret: None. Trained model stored internally to class instance state.\n Raises:\n None\n \"\"\"\n # IMPLEMENT TRAINING.\n # pass\n\n self.dataset_filepaths, self.dataset_brat_folders = self._get_valid_dataset_filepaths(self.parameters)\n self._check_param_compatibility(self.parameters, self.dataset_filepaths)\n\n # Load dataset\n self.modeldata = dataset.Dataset(verbose=self.parameters['verbose'], debug=self.parameters['debug'])\n self.token_to_vector = self.modeldata.load_dataset(data, self.dataset_filepaths, self.parameters)\n\n # Launch session. Automatically choose a device\n # if the specified one doesn't exist\n session_conf = tf.ConfigProto(\n intra_op_parallelism_threads=self.parameters['number_of_cpu_threads'],\n inter_op_parallelism_threads=self.parameters['number_of_cpu_threads'],\n device_count={'CPU': 1, 'GPU': self.parameters['number_of_gpus']},\n allow_soft_placement=True,\n log_device_placement=False)\n\n self.sess = tf.Session(config=session_conf)\n with self.sess.as_default():\n\n # Initialize or load pretrained model\n self.model = EntityLSTM(self.modeldata, self.parameters)\n self.sess.run(tf.global_variables_initializer())\n\n if self.parameters['use_pretrained_model']:\n self.transition_params_trained = self.model.restore_from_pretrained_model(self.parameters,\n self.modeldata, self.sess,\n token_to_vector=self.token_to_vector)\n else:\n self.model.load_pretrained_token_embeddings(self.sess, self.modeldata,\n self.parameters, self.token_to_vector)\n self.transition_params_trained = np.random.rand(len(self.modeldata.unique_labels) + 2,\n len(self.modeldata.unique_labels) + 2)\n\n parameters = self.parameters\n conf_parameters = self.conf_parameters\n dataset_filepaths = self.dataset_filepaths\n modeldata = self.modeldata\n dataset_brat_folders = self.dataset_brat_folders\n sess = self.sess\n model = self.model\n transition_params_trained = self.transition_params_trained\n stats_graph_folder, experiment_timestamp = self._create_stats_graph_folder(parameters)\n\n self.stats_graph_folder = stats_graph_folder\n\n # Initialize and save execution details\n start_time = time.time()\n results = {}\n results['epoch'] = {}\n results['execution_details'] = {}\n results['execution_details']['train_start'] = start_time\n results['execution_details']['time_stamp'] = experiment_timestamp\n results['execution_details']['early_stop'] = False\n results['execution_details']['keyboard_interrupt'] = False\n results['execution_details']['num_epochs'] = 0\n results['model_options'] = copy.copy(parameters)\n\n model_folder = os.path.join(stats_graph_folder, 'model')\n self.modelFolder = model_folder\n utils.create_folder_if_not_exists(model_folder)\n with open(os.path.join(model_folder, 'parameters.ini'), 'w') as parameters_file:\n conf_parameters.write(parameters_file)\n pickle.dump(modeldata, open(os.path.join(model_folder, 'dataset.pickle'), 'wb'))\n\n tensorboard_log_folder = os.path.join(stats_graph_folder, 'tensorboard_logs')\n utils.create_folder_if_not_exists(tensorboard_log_folder)\n tensorboard_log_folders = {}\n for dataset_type in dataset_filepaths.keys():\n tensorboard_log_folders[dataset_type] = os.path.join(stats_graph_folder,\n 'tensorboard_logs', dataset_type)\n utils.create_folder_if_not_exists(tensorboard_log_folders[dataset_type])\n\n # Instantiate the writers for TensorBoard\n writers = {}\n for dataset_type in dataset_filepaths.keys():\n writers[dataset_type] = tf.summary.FileWriter(tensorboard_log_folders[dataset_type],\n graph=sess.graph)\n\n # embedding_writer has to write in model_folder, otherwise TensorBoard won't be able to view embeddings\n embedding_writer = tf.summary.FileWriter(model_folder)\n\n embeddings_projector_config = projector.ProjectorConfig()\n tensorboard_token_embeddings = embeddings_projector_config.embeddings.add()\n tensorboard_token_embeddings.tensor_name = model.token_embedding_weights.name\n token_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_tokens.tsv')\n tensorboard_token_embeddings.metadata_path = os.path.relpath(token_list_file_path, '.')\n\n tensorboard_character_embeddings = embeddings_projector_config.embeddings.add()\n tensorboard_character_embeddings.tensor_name = model.character_embedding_weights.name\n character_list_file_path = os.path.join(model_folder, 'tensorboard_metadata_characters.tsv')\n tensorboard_character_embeddings.metadata_path = os.path.relpath(character_list_file_path, '.')\n\n projector.visualize_embeddings(embedding_writer, embeddings_projector_config)\n\n # Write metadata for TensorBoard embeddings\n token_list_file = codecs.open(token_list_file_path, 'w', 'UTF-8')\n for token_index in range(modeldata.vocabulary_size):\n token_list_file.write('{0}\\n'.format(modeldata.index_to_token[token_index]))\n token_list_file.close()\n\n character_list_file = codecs.open(character_list_file_path, 'w', 'UTF-8')\n for character_index in range(modeldata.alphabet_size):\n if character_index == modeldata.PADDING_CHARACTER_INDEX:\n character_list_file.write('PADDING\\n')\n else:\n character_list_file.write('{0}\\n'.format(modeldata.index_to_character[character_index]))\n character_list_file.close()\n\n # Start training + evaluation loop. Each iteration corresponds to 1 epoch.\n # number of epochs with no improvement on the validation test in terms of F1-score\n bad_counter = 0\n previous_best_valid_f1_score = 0\n epoch_number = -1\n try:\n while True:\n step = 0\n epoch_number += 1\n print('\\nStarting epoch {0}'.format(epoch_number))\n\n epoch_start_time = time.time()\n\n if epoch_number != 0:\n # Train model: loop over all sequences of training set with shuffling\n sequence_numbers = list(range(len(modeldata.token_indices['train'])))\n random.shuffle(sequence_numbers)\n for sequence_number in sequence_numbers:\n transition_params_trained = train.train_step(sess, modeldata,\n sequence_number, model, parameters)\n step += 1\n if step % 10 == 0:\n print('Training {0:.2f}% done'.format(step / len(sequence_numbers) * 100),\n end='\\r', flush=True)\n\n epoch_elapsed_training_time = time.time() - epoch_start_time\n print('Training completed in {0:.2f} seconds'.format(epoch_elapsed_training_time),\n flush=True)\n\n y_pred, y_true, output_filepaths = train.predict_labels(sess, model,\n transition_params_trained, parameters,\n modeldata, epoch_number,\n stats_graph_folder, dataset_filepaths)\n\n # Evaluate model: save and plot results\n #evaluate.evaluate_model(results, modeldata, y_pred, y_true, stats_graph_folder,\n # epoch_number, epoch_start_time, output_filepaths, parameters)\n\n # Save model\n model.saver.save(sess, os.path.join(model_folder, 'model_{0:05d}.ckpt'.format(epoch_number)))\n\n # Save TensorBoard logs\n summary = sess.run(model.summary_op, feed_dict=None)\n writers['train'].add_summary(summary, epoch_number)\n writers['train'].flush()\n utils.copytree(writers['train'].get_logdir(), model_folder)\n\n # Early stop\n '''\n valid_f1_score = results['epoch'][epoch_number][0]['valid']['f1_score']['micro']\n if valid_f1_score > previous_best_valid_f1_score:\n bad_counter = 0\n previous_best_valid_f1_score = valid_f1_score\n conll_to_brat.output_brat(output_filepaths, dataset_brat_folders,\n stats_graph_folder, overwrite=True)\n self.transition_params_trained = transition_params_trained\n else:\n bad_counter += 1\n print(\"The last {0} epochs have not shown improvements on the validation set.\".format(bad_counter))\n '''\n\n if bad_counter >= parameters['patience']:\n print('Early Stop!')\n results['execution_details']['early_stop'] = True\n break\n\n if epoch_number >= parameters['maximum_number_of_epochs']:\n break\n\n except KeyboardInterrupt:\n results['execution_details']['keyboard_interrupt'] = True\n print('Training interrupted')\n\n print('Finishing the experiment')\n end_time = time.time()\n results['execution_details']['train_duration'] = end_time - start_time\n results['execution_details']['train_end'] = end_time\n evaluate.save_results(results, stats_graph_folder)\n self.training = True\n main_folder = os.path.basename(stats_graph_folder)\n prepare_pretrained_model.prepare_pretrained_model_for_restoring(main_folder, epoch_number, 'output', False)\n for dataset_type in dataset_filepaths.keys():\n writers[dataset_type].close()\n\n #@overrides(DITKModel_NER)\n def predict(self, data, *args, **kwargs):\n \"\"\"\n Predicts on the given input data. Assumes model has been trained with train()\n Args:\n data: iterable of arbitrary format. represents the data instances and features you use to make predictions\n Note that prediction requires trained model. Precondition that class instance already stores trained model\n information.\n\n Returns:\n predictions: [tuple,...], i.e. list of tuples.\n Each tuple is (start index, span, mention text, mention type)\n Where:\n - start index: int, the index of the first character of the mention span. None if not applicable.\n - span: int, the length of the mention. None if not applicable.\n - mention text: str, the actual text that was identified as a named entity. Required.\n - mention type: str, the entity/mention type. None if not applicable.\n NOTE: len(predictions) should equal len(data) AND the ordering should not change [important for\n evalutation. See note in evaluate() about parallel arrays.]\n Raises:\n None\n \"\"\"\n # IMPLEMENT PREDICTION. STRICT OUTPUT FORMAT REQUIRED.\n\n # return predictions\n\n if isinstance(data, str):\n return self.predict_text(data)\n else:\n if len(args)==0:\n return self.predict_dataset(data)\n else:\n dataset_type = 'valid' if args[0] == 'dev' else args[0]\n return self.predict_dataset(data, dataset_type)\n\n #@overrides(DITKModel_NER)\n def evaluate(self, predictions, groundTruths, *args, **kwargs):\n \"\"\"\n Calculates evaluation metrics on chosen benchmark dataset [Precision,Recall,F1, or others...]\n Args:\n predictions: [tuple,...], list of tuples [same format as output from predict]\n groundTruths: [tuple,...], list of tuples representing ground truth.\n Returns:\n metrics: tuple with (p,r,f1). Each element is float.\n Raises:\n None\n \"\"\"\n # pseudo-implementation\n # we have a set of predictions and a set of ground truth data.\n # calculate true positive, false positive, and false negative\n # calculate Precision = tp/(tp+fp)\n # calculate Recall = tp/(tp+fn)\n # calculate F1 using precision and recall\n\n # return (precision, recall, f1)\n predicted_labels = [predicted[3] for predicted in predictions if predicted[3]!='']\n ground_labels = [true_labels[3] for true_labels in groundTruths if true_labels[3]!='']\n\n label_encoder = sklearn.preprocessing.LabelEncoder()\n label_set = list(self.modeldata.label_to_index.keys())\n label_encoder.fit(label_set)\n\n ground_labels = label_encoder.transform(ground_labels)\n predicted_labels = label_encoder.transform(predicted_labels)\n\n new_y_pred, new_y_true, new_label_indices, new_label_names, _, _ = evaluate.remap_labels(predicted_labels,\n ground_labels,\n self.modeldata,\n self.parameters[\n 'main_evaluation_mode'])\n\n print(sklearn.metrics.classification_report(new_y_true, new_y_pred,\n digits=4, labels=new_label_indices, target_names=new_label_names))\n precision = sklearn.metrics.precision_score(new_y_true, new_y_pred, average='micro', labels=new_label_indices)\n recall = sklearn.metrics.recall_score(new_y_true, new_y_pred, average='micro', labels=new_label_indices)\n f1 = sklearn.metrics.f1_score(new_y_true, new_y_pred, average='micro', labels=new_label_indices)\n return precision, recall, f1\n\n def get_params(self):\n \"\"\"\n Returns set parameters.\n\n Args:\n None.\n Returns:\n parameters (dict) : {key: value}, dictionary with parameters as key along with their corresponding value\n Raises:\n None\n \"\"\"\n return self.parameters\n\n def close(self):\n \"\"\"\n Clean up helper function\n\n Args:\n None.\n Returns:\n None.\n Raises:\n None\n \"\"\"\n self.__del__()\n\n def __del__(self):\n \"\"\"\n Deletes tensorflow session.\n\n Args:\n None.\n Returns:\n None.\n Raises:\n None\n \"\"\"\n self.sess.close()\n\n", "sub_path": "extraction/named_entity/neuroner/neuroner.py", "file_name": "neuroner.py", "file_ext": "py", "file_size_in_byte": 40120, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.name", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "matplotlib.use", "line_number": 24, "usage_type": "call"}, {"api_name": "ner.Ner", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.load_parameters", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.get_current_time_in_miliseconds", "line_number": 62, "usage_type": "call"}, {"api_name": "utils.get_basename_without_extension", "line_number": 63, "usage_type": "call"}, {"api_name": "utils.create_folder_if_not_exists", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "utils.create_folder_if_not_exists", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "brat_to_conll.check_brat_annotation_and_text_compatibility", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "conll_to_brat.check_compatibility_between_conll_and_brat_text", "line_number": 110, "usage_type": "call"}, {"api_name": "conll_to_brat.conll_to_brat", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "conll_to_brat.check_compatibility_between_conll_and_brat_text", "line_number": 129, "usage_type": "call"}, {"api_name": "brat_to_conll.brat_to_conll", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "utils.get_basename_without_extension", "line_number": 146, "usage_type": "call"}, {"api_name": "utils_nlp.convert_conll_from_bio_to_bioes", "line_number": 147, "usage_type": "call"}, {"api_name": "utils.check_param_compatibility", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path", "line_number": 198, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 199, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path", "line_number": 204, "usage_type": "attribute"}, {"api_name": "utils.create_folder_if_not_exists", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path", "line_number": 207, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 210, "usage_type": "call"}, {"api_name": "train.prediction_step", "line_number": 224, "usage_type": "call"}, {"api_name": "conll_to_brat.output_brat", "line_number": 230, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path", "line_number": 234, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path", "line_number": 235, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path", "line_number": 236, "usage_type": "attribute"}, {"api_name": "utils.get_basename_without_extension", "line_number": 238, "usage_type": "call"}, {"api_name": "brat_to_conll.get_entities_from_brat", "line_number": 239, "usage_type": "call"}, {"api_name": "spacy.load", "line_number": 243, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path", "line_number": 280, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 280, "usage_type": "call"}, {"api_name": "tensorflow.reset_default_graph", "line_number": 281, "usage_type": "call"}, {"api_name": "dataset.Dataset", "line_number": 285, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 292, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 299, "usage_type": "call"}, {"api_name": "entity_lstm.EntityLSTM", "line_number": 303, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 304, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 322, "usage_type": "call"}, {"api_name": "os.path", "line_number": 322, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 324, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 325, "usage_type": "call"}, {"api_name": "tensorflow.contrib.crf.viterbi_decode", "line_number": 342, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 342, "usage_type": "attribute"}, {"api_name": "utils_nlp.bioes_to_bio", "line_number": 357, "usage_type": "call"}, {"api_name": "utils_nlp.bioes_to_bio", "line_number": 358, "usage_type": "call"}, {"api_name": "dataset.Dataset", "line_number": 438, "usage_type": "call"}, {"api_name": "utils.create_folder_if_not_exists", "line_number": 474, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 475, "usage_type": "call"}, {"api_name": "os.path", "line_number": 475, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 503, "usage_type": "call"}, {"api_name": "os.path", "line_number": 503, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 505, "usage_type": "call"}, {"api_name": "os.path", "line_number": 505, "usage_type": "attribute"}, {"api_name": "dataset.Dataset", "line_number": 543, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 548, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 555, "usage_type": "call"}, {"api_name": "entity_lstm.EntityLSTM", "line_number": 559, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 560, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 569, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 585, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 594, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 596, "usage_type": "call"}, {"api_name": "os.path", "line_number": 596, "usage_type": "attribute"}, {"api_name": "utils.create_folder_if_not_exists", "line_number": 598, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 599, "usage_type": "call"}, {"api_name": "os.path", "line_number": 599, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 601, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 601, "usage_type": "call"}, {"api_name": "os.path", "line_number": 601, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 603, "usage_type": "call"}, {"api_name": "os.path", "line_number": 603, "usage_type": "attribute"}, {"api_name": "utils.create_folder_if_not_exists", "line_number": 604, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 607, "usage_type": "call"}, {"api_name": "os.path", "line_number": 607, "usage_type": "attribute"}, {"api_name": "utils.create_folder_if_not_exists", "line_number": 609, "usage_type": "call"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 614, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 614, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 618, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 618, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig", "line_number": 620, "usage_type": "call"}, {"api_name": "tensorflow.contrib.tensorboard.plugins.projector", "line_number": 620, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 623, "usage_type": "call"}, {"api_name": "os.path", "line_number": 623, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 624, "usage_type": "call"}, {"api_name": "os.path", "line_number": 624, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 628, "usage_type": "call"}, {"api_name": "os.path", "line_number": 628, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 629, "usage_type": "call"}, {"api_name": "os.path", "line_number": 629, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings", "line_number": 631, "usage_type": "call"}, {"api_name": "tensorflow.contrib.tensorboard.plugins.projector", "line_number": 631, "usage_type": "name"}, {"api_name": "codecs.open", "line_number": 634, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 639, "usage_type": "call"}, {"api_name": "time.time", "line_number": 658, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 663, "usage_type": "call"}, {"api_name": "train.train_step", "line_number": 665, "usage_type": "call"}, {"api_name": "time.time", "line_number": 672, "usage_type": "call"}, {"api_name": "train.predict_labels", "line_number": 676, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 686, "usage_type": "call"}, {"api_name": "os.path", "line_number": 686, "usage_type": "attribute"}, {"api_name": "utils.copytree", "line_number": 692, "usage_type": "call"}, {"api_name": "time.time", "line_number": 721, "usage_type": "call"}, {"api_name": "evaluate.save_results", "line_number": 724, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 726, "usage_type": "call"}, {"api_name": "os.path", "line_number": 726, "usage_type": "attribute"}, {"api_name": "prepare_pretrained_model.prepare_pretrained_model_for_restoring", "line_number": 727, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 789, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 789, "usage_type": "attribute"}, {"api_name": "evaluate.remap_labels", "line_number": 796, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 802, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 802, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 804, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 804, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 805, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 805, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 806, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 806, "usage_type": "attribute"}]} +{"seq_id": "633802513", "text": "\"\"\" Turning a webpage into data using BeautifulSoup: getting the hyperlinks\nIn this exercise, you'll figure out how to extract the URLs of the hyperlinks from the BDFL's webpage. In the process, you'll become close friends with the soup method find_all(). \"\"\"\n\n\n# Import packages\nimport requests\nfrom bs4 import BeautifulSoup\n\n# Specify url\nurl = 'https://www.python.org/~guido/'\n\n# Package the request, send the request and catch the response: r\nr = requests.get(url)\n\n# Extracts the response as html: html_doc\nhtml_doc = r.text\n\n# create a BeautifulSoup object from the HTML: soup\nsoup = BeautifulSoup(html_doc)\n\n# Print the title of Guido's webpage\nprint(soup.title)\n\n# Find all 'a' tags (which define hyperlinks): a_tags\na_tags = soup.find_all('a')\n\n# Print the URLs to the shell\nfor link in a_tags:\n print(link.get('href'))\n", "sub_path": "1-importing-data-from-the-internet/9_turn_webpage_into_data_using_beautifulsoup_gettinhyperlinks.py", "file_name": "9_turn_webpage_into_data_using_beautifulsoup_gettinhyperlinks.py", "file_ext": "py", "file_size_in_byte": 832, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "185480799", "text": "import base64\nimport xml.etree.ElementTree as ET\n\nimport requests\n\nok_response_codes = requests.codes.ok\n\n\ndef get_xml_content(response, tag=\"result\"):\n \"\"\"If the response is successful it returns the text held within\n the 'tag' element. If not the response status is returned\"\"\"\n\n tree = ET.fromstring(response)\n # tag = \"*\" + tag\n\n if tree.attrib[\"responseStatus\"] == \"success\":\n return tree.findtext(tag)\n else:\n return tree.findtext(\"description\")\n\n\ndef get_xml_content_list(response):\n \"\"\"If the response is successful it returns a list of the text held\n within all of the elements. If not the response status is returned\"\"\"\n\n tree = ET.fromstring(response)\n tag_list = []\n\n if tree.attrib[\"responseStatus\"] == \"success\":\n for object_id in tree.itertext():\n object_id = object_id.strip()\n if object_id:\n tag_list.append(object_id)\n return tag_list\n else:\n return tree.attrib[\"responseStatus\"]\n\n\ndef close_smart_email_connection(session, token, message_id=\"\", segment_id=\"\", template_id=\"\"):\n \"\"\"if any of the ID's have a value a GET or DELETE request to\n delete them based on this ID is sent. \"\"\"\n\n if message_id:\n url = \"%s/message/deleteMessage/%s/%s\" % (session, token, message_id)\n requests.get(url)\n if segment_id:\n url = \"%s//segmentationservice/%s/segment/%s\" % (session, token, segment_id)\n requests.delete(url)\n if template_id:\n url = \"%s/template/delete/%s/%s\" % (session, token, message_id)\n requests.get(url)\n\n url = \"%s/connect/close/%s\" % (session, token)\n requests.get(url)\n\n\ndef base_64_encode(string, prefix=\"\"):\n \"\"\"will base64 encode 'string' and optionally return it\n concatenated with a prefix if supplied. Used for some tokens\"\"\"\n\n if prefix:\n return \"%s %s\" % (prefix, base64.b64encode(string))\n else:\n return base64.b64encode(string)\n", "sub_path": "Emails/API/Utils/api_functions.py", "file_name": "api_functions.py", "file_ext": "py", "file_size_in_byte": 1960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.codes", "line_number": 6, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 13, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 13, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 26, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 26, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 45, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 51, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 54, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 62, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "262130604", "text": "import collections\n\nimport cv2\nimport numpy as np\n\n\nclass Line:\n \"\"\"\n Represents a lane line on a road surface.\n \"\"\"\n\n def __init__(self, hyperparameters, buffer_len=10):\n self.hyperparameters = hyperparameters\n\n # was the line detected in the last iteration?\n self.detected = False\n\n # polynomial coefficients averaged over the last n iterations\n self.best_fit_coeffs = collections.deque(maxlen=2 * buffer_len)\n\n # x values of the last n fits of the line\n self.recent_xfitted = [1, 1, 1]\n\n # average x values of the fitted line over the last n iterations\n self.bestx = [1, 1, 1]\n\n # polynomial coefficients for the most recent fit\n self.current_fit_coeffs = [np.array([False])]\n\n # distance in meters of vehicle center from the line\n self.line_base_pos = None\n\n # difference in fit coefficients between last and new fits\n self.diffs = np.array([0, 0, 0], dtype='float')\n\n # x values for detected line pixels\n self.allx = None\n # y values for detected line pixels\n self.ally = None\n\n def reset(self):\n \"\"\"\n Reset the line parameters to default values.\n \"\"\"\n self.detected = False\n self.best_fit_coeffs = collections.deque(maxlen=2 * 10)\n self.recent_xfitted = [1, 1, 1]\n self.bestx = [1, 1, 1]\n self.current_fit_coeffs = [np.array([False])]\n self.line_base_pos = None\n self.diffs = np.array([0, 0, 0], dtype='float')\n self.allx = None\n self.ally = None\n\n def update(self, coeffs, is_detected=True, clear_buffer=False):\n \"\"\"\n Update Line with new fitted coefficients.\n\n :param coeffs: new polynomial coefficients (in pixels)\n :param is_detected: if the Line was detected or inferred\n :param clear_buffer: if True, reset state\n :return: None\n \"\"\"\n self.detected = is_detected\n\n if not self.detected:\n self.reset()\n\n if clear_buffer:\n self.recent_xfitted = []\n\n self.current_fit_coeffs = coeffs\n self.best_fit_coeffs.append(self.current_fit_coeffs)\n self.recent_xfitted.append(self.current_fit_coeffs)\n self.bestx = np.average(self.recent_xfitted, axis=0)\n\n def radius_of_curvature(self, coeffs, ploty):\n \"\"\"\n Radius of curve = ( (1 + (2Ay + B)^2)^(3/2) ) / |2A|\n\n :param coeffs: polynomial coefficients\n :param ploty: y parameters for plotting\n :return: radius\n \"\"\"\n A = coeffs[0]\n B = coeffs[1]\n y = np.max(ploty) * self.hyperparameters.lane().metres_per_pixel_y\n\n r_curve = ((1 + (2 * A * y + B) ** 2) ** (3 / 2)) / np.absolute(2 * A)\n return r_curve\n\n @property\n def average_fit(self):\n \"\"\"\n :return: average of polynomial coefficients of the last N iterations\n \"\"\"\n return np.mean(self.best_fit_coeffs, axis=0)\n\n def draw(self, mask, color=(255, 0, 0), line_width=50, average=False):\n \"\"\"\n Draw the line on a color mask image.\n \"\"\"\n h, w, c = mask.shape\n\n plot_y = np.linspace(0, h - 1, h)\n coeffs = self.average_fit if average else self.current_fit_coeffs\n\n line_center = coeffs[0] * plot_y ** 2 + coeffs[1] * plot_y + coeffs[2]\n line_left_side = line_center - line_width // 2\n line_right_side = line_center + line_width // 2\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array(list(zip(line_left_side, plot_y)))\n pts_right = np.array(np.flipud(list(zip(line_right_side, plot_y))))\n pts = np.vstack([pts_left, pts_right])\n\n # Draw the lane onto the warped blank image\n return cv2.fillPoly(mask, [np.int32(pts)], color)\n", "sub_path": "lane_finding/model/line.py", "file_name": "line.py", "file_ext": "py", "file_size_in_byte": 3836, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "collections.deque", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.fillPoly", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "240337293", "text": "# -*- encoding:utf-8 -*-\n\nimport time\nimport os\nfrom openpyxl import Workbook\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\n\ndef make_excel(dataList):\n \"\"\"\n :호출예시 make_excel([ [1,2,3,4], [5,6,7,8] ]) or make_excel(2dArray)\n :param dataList: [ data1, data2, data3, data4 ] 꼴의 1차원 list를 가지는 2차원 list\n :return: 없\n \"\"\"\n # === CONFIG\n FILENAME = \"엔카.xlsx\"\n\n # === SAVE EXCEL\n wb = Workbook()\n ws1 = wb.worksheets[0]\n header1 = ['제조사', '모델', '세부모델', '등급']\n ws1.column_dimensions['A'].width = 30\n ws1.column_dimensions['B'].width = 30\n ws1.column_dimensions['C'].width = 50\n ws1.column_dimensions['D'].width = 50\n ws1.append(header1)\n # data save\n\n for data in dataList:\n ws1.append(data)\n # end\n wb.save(FILENAME)\n\n\ndef make_excel_manufacturer(dataList, name):\n \"\"\"\n :호출예시 make_excel([ [1,2,3,4], [5,6,7,8] ]) or make_excel(2dArray)\n :param dataList: [ data1, data2, data3, data4 ] 꼴의 1차원 list를 가지는 2차원 list\n :return: 없\n \"\"\"\n # === CONFIG\n FILENAME = \"엔카_\" + name + \".xlsx\"\n\n # === SAVE EXCEL\n wb = Workbook()\n ws1 = wb.worksheets[0]\n header1 = ['제조사', '모델', '세부모델', '등급']\n ws1.column_dimensions['A'].width = 30\n ws1.column_dimensions['B'].width = 30\n ws1.column_dimensions['C'].width = 50\n ws1.column_dimensions['D'].width = 50\n ws1.append(header1)\n # data save\n\n for data in dataList:\n ws1.append(data)\n # end\n wb.save(FILENAME)\n\n\ndef chk_loading():\n bs4 = BeautifulSoup(driver.page_source, 'lxml')\n style_attr = bs4.find('div', class_='case_loading').get('style')\n if style_attr == 'display:none' or style_attr == 'display: none;':\n return True\n else:\n return False\n\ndef wait_loading():\n while not chk_loading():\n time.sleep(0.2)\n\n\nif __name__ == \"__main__\":\n # ========= BETA SETTING\n \"\"\"\n now = 1532504559.5943735\n terminTime = now + 60 * 60 * 3\n print(\"체험판 만료기간 : \", time.ctime(terminTime))\n if time.time() > terminTime:\n print('만료되었습니다.')\n exit(-1)\n \"\"\"\n\n # =========\n # SETTING\n setting_list = []\n dir_name_dom = \"\"\n dir_name_imp = \"\"\n now_idx = []\n try:\n setting_file = open('setting.ini', 'r', encoding='utf-8')\n setting_file.fileno()\n setting_list = setting_file.readlines()\n print(\"[COMPLETE] Setting File 확인\")\n\n temp = 0\n for i in setting_list:\n # print(temp, ': ', end='')\n # print(i)\n temp += 1\n\n # DIRECTORY\n dir_name_dom = setting_list[1].split(':')[1].strip()\n dir_name_imp = setting_list[2].split(':')[1].strip()\n\n # NOW INDEX\n for i in setting_list[5].split('/'):\n now_idx.append(int(i.strip()))\n\n setting_file.close()\n except FileNotFoundError:\n print(\"[ERROR] Setting File 확인 실패\")\n exit()\n\n # DRIVER INITIATE\n driver = webdriver.Chrome('chromedriver.exe')\n driver.maximize_window()\n\n # VARIABLE\n url_list = [\n 'http://www.encar.com/dc/dc_carsearchlist.do?carType=kor&searchType=model&TG.R=A#!',\n 'http://www.encar.com/fc/fc_carsearchlist.do?carType=for&searchType=model&TG.R=B#!'\n ]\n\n url_pivot = now_idx[0]\n depth1_pivot = now_idx[1]\n depth2_pivot = -1\n depth3_pivot = -1\n\n depth1 = \"\"\n depth2 = \"\"\n depth3 = \"\"\n depth4 = \"\"\n\n result = []\n result_temp = []\n\n cancel_depth1 = '//*[@id=\"schModelstep\"]/div/p/input'\n cancel_depth2 = '//*[@id=\"schModelstep\"]/div/p[2]/input'\n cancel_depth3 = '//*[@id=\"schModelstep\"]/div/p[3]/input'\n\n current_path = os.getcwd()\n # =========\n # STEP 0 : Directory 생성\n try:\n if not os.path.isdir('./' + dir_name_dom):\n os.mkdir('./' + dir_name_dom)\n\n if not os.path.isdir('./' + dir_name_imp):\n os.mkdir('./' + dir_name_imp)\n print(\"[COMPLETE] Directory 생성 완료\")\n except:\n print(\"[ERROR] Directory 생성 실패\")\n exit()\n\n # STEP 1.0 : 국산차 url, 수입차 url 이동\n while url_pivot < 2:\n\n # STEP 1.0.0 : 현재 Directory 이동\n if url_pivot == 0:\n os.chdir(current_path + '/' + dir_name_dom)\n elif url_pivot == 1:\n os.chdir(current_path + '/' + dir_name_imp)\n\n driver.get(url_list[url_pivot])\n time.sleep(0.5)\n\n # STEP 1.1 : 제조사 개수 확인 [ Depth 1 ]\n depth1_cnt = -1\n try:\n bs4 = BeautifulSoup(driver.page_source, 'lxml')\n if url_pivot == 0:\n depth1_cnt = len(bs4.find('div', id='stepManufact').find_all('dd'))\n elif url_pivot == 1:\n depth1_cnt = len(bs4.find('div', id='stepManufact').find('dl', class_='deplist sort_lista').find_all('dd'))\n except:\n print(\"[ERROR] 제조사 항목 인식 실패\")\n driver.implicitly_wait(2)\n time.sleep(1)\n print(\"......\")\n print(\"[REPAIR] 다시 시도\")\n continue\n\n # STEP 1.2 : 제조사 선택 [ Depth 1 ]\n while depth1_pivot <= depth1_cnt:\n result_temp.clear()\n depth1_dd_x_path = \"\"\n try:\n if url_pivot == 0:\n depth1_dd_x_path = '//*[@id=\"stepManufact\"]/dl/dd[{}]'.format(depth1_pivot)\n elif url_pivot == 1:\n depth1_dd_x_path = '//*[@id=\"stepManufact\"]/dl[2]/dd[{}]'.format(depth1_pivot)\n driver.find_element_by_xpath(depth1_dd_x_path).click()\n time.sleep(0.2)\n except:\n print(\"[ERROR] 제조사 항목 선택 실패\")\n driver.implicitly_wait(2)\n time.sleep(1)\n print(\"......\")\n print(\"[REPAIR] 다시 시도\")\n continue\n\n wait_loading()\n\n # STEP 1.3 : 모델 개수 확인 [ Depth 2 ]\n while True:\n try:\n bs4 = BeautifulSoup(driver.page_source, 'lxml')\n temp_depth2 = bs4.find('div', id='stepModel').find('dl', class_='deplist sort_lista')\n case_depth2 = 0\n if not temp_depth2:\n temp_depth2 = bs4.find('div', id='stepModel').find('dl', class_='deplist sort_titnon')\n case_depth2 = 1\n depth2_cnt = len(temp_depth2.find_all('dd'))\n except:\n print(\"[ERROR] 모델 항목 인식 실패\")\n driver.implicitly_wait(2)\n time.sleep(1)\n print(\"......\")\n print(\"[REPAIR] 다시 시도\")\n continue\n break\n\n # STEP 1.4 : 모델 선택 [ Depth 2 ]\n depth2_pivot = 1\n while depth2_pivot <= depth2_cnt:\n depth2_dd_x_path = \"\"\n try:\n if case_depth2 == 0:\n depth2_dd_x_path = '//*[@id=\"stepModel\"]/dl[2]/dd[{}]'.format(depth2_pivot)\n elif case_depth2 == 1:\n depth2_dd_x_path = '//*[@id=\"stepModel\"]/dl/dd[{}]'.format(depth2_pivot)\n driver.find_element_by_xpath(depth2_dd_x_path).click()\n time.sleep(0.2)\n except:\n print(\"[ERROR] 모델 항목 선택 실패\")\n driver.implicitly_wait(2)\n time.sleep(1)\n print(\"......\")\n print(\"[REPAIR] 다시 시도\")\n continue\n\n wait_loading()\n\n # STEP 1.5 : 세부 모델 개수 확인 [ Depth 3 ]\n while True:\n try:\n bs4 = BeautifulSoup(driver.page_source, 'lxml')\n depth3_cnt = len(bs4.find('div', id='stepDeModel').find_all('dd'))\n except:\n print(\"[ERROR] 세부 모델 항목 인식 실패\")\n driver.implicitly_wait(2)\n time.sleep(1)\n print(\"......\")\n print(\"[REPAIR] 다시 시도\")\n continue\n break\n\n # STEP 1.6 : 세부 모델 선택 [ Depth 3 ]\n depth3_pivot = 1\n while depth3_pivot <= depth3_cnt:\n try:\n depth3_dd_x_path = '//*[@id=\"stepDeModel\"]/dl/dd[{}]'.format(depth3_pivot)\n driver.find_element_by_xpath(depth3_dd_x_path).click()\n time.sleep(0.2)\n except:\n print(\"[ERROR] 세부 모델 항목 선택 실패\")\n driver.implicitly_wait(2)\n time.sleep(1)\n print(\"......\")\n print(\"[REPAIR] 다시 시도\")\n continue\n\n wait_loading()\n\n # STEP 1.7 : 제조사, 모델, 세부 모델 저장 [ Depth 1, Depth 2, Depth 3 ]\n while True:\n try:\n bs4 = BeautifulSoup(driver.page_source, 'lxml')\n depth1 = bs4.find('p', class_='choitem step1').find('strong').get_text()\n depth2 = bs4.find('p', class_='choitem step2').find('strong').get_text()\n depth3 = bs4.find('p', class_='choitem step3').find('strong').get_text()\n except:\n print(\"[ERROR] 제조사, 모델, 세부 모델 항목 인식 실패\")\n driver.implicitly_wait(2)\n time.sleep(1)\n print(\"......\")\n print(\"[REPAIR] 다시 시도\")\n continue\n break\n\n # STEP 1.8 : 등급 LIST 확인 [ Depth 4 ]\n while True:\n try:\n depth4_dd_list = []\n bs4 = BeautifulSoup(driver.page_source, 'lxml')\n temp = bs4.find('div', id='stepGardeSet')\n if temp != None:\n depth4_dd_list = temp.find_all('dd')\n else:\n break\n except:\n print(\"[ERROR] 등급 항목 인식 실패\")\n driver.implicitly_wait(2)\n time.sleep(1)\n print(\"......\")\n print(\"[REPAIR] 다시 시도\")\n continue\n break\n\n # STEP 1.9 : 등급 저장 [ Depth 4 ]\n temp_result = []\n if len(depth4_dd_list) != 0:\n for depth4_dd in depth4_dd_list:\n temp_result = []\n depth4 = depth4_dd.find('label').get_text()\n\n # STEP 1.10.1 : RESULT LIST에 결과 저장 [ Depth 1, Depth 2, Depth 3, Depth 4 ] (일반 항목)\n temp_result.append(depth1)\n temp_result.append(depth2)\n temp_result.append(depth3)\n temp_result.append(depth4)\n result.append(temp_result)\n result_temp.append(temp_result)\n print(temp_result)\n else:\n # STEP 1.10.2 : RESULT LIST에 결과 저장 [ Depth 1, Depth 2, Depth 3, Depth 4 ] (기타 항목)\n depth4 = \"\"\n temp_result.append(depth1)\n temp_result.append(depth2)\n temp_result.append(depth3)\n temp_result.append(depth4)\n result.append(temp_result)\n result_temp.append(temp_result)\n print(temp_result)\n\n # STEP 1.10 : 세부 모델 취소\n while True:\n try:\n driver.find_element_by_xpath(cancel_depth3).click()\n time.sleep(0.2)\n except:\n print(\"[ERROR] 세부 모델 취소 버튼 인식 실패\")\n driver.implicitly_wait(2)\n time.sleep(1)\n print(\"......\")\n print(\"[REPAIR] 다시 시도\")\n continue\n break\n wait_loading()\n depth3_pivot += 1\n\n # STEP 1.11 : 모델 취소\n while True:\n try:\n driver.find_element_by_xpath(cancel_depth2).click()\n time.sleep(0.2)\n except:\n print(\"[ERROR] 모델 취소 버튼 인식 실패\")\n driver.implicitly_wait(2)\n time.sleep(1)\n print(\"......\")\n print(\"[REPAIR] 다시 시도\")\n continue\n break\n wait_loading()\n depth2_pivot += 1\n\n # STEP 1.12 : 제조사 취소\n while True:\n try:\n driver.find_element_by_xpath(cancel_depth1).click()\n time.sleep(0.2)\n except:\n print(\"[ERROR] 제조사 취소 버튼 인식 실패\")\n driver.implicitly_wait(2)\n time.sleep(1)\n print(\"......\")\n print(\"[REPAIR] 다시 시도\")\n continue\n break\n wait_loading()\n depth1_pivot += 1\n\n # STEP 1.13 : 제조사별 Excel 생성\n make_excel_manufacturer(result_temp, depth1)\n url_pivot += 1\n depth1_pivot = 1\n\n # STEP 1.14 : driver 종료\n driver.quit()\n\n # STEP 2 : Excel 생성\n os.chdir(current_path)\n make_excel(result)\n", "sub_path": "encar_crawling.py", "file_name": "encar_crawling.py", "file_ext": "py", "file_size_in_byte": 14570, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "openpyxl.Workbook", "line_number": 19, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 45, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 62, "usage_type": "call"}, {"api_name": "bs4.find", "line_number": 63, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 71, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 117, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 117, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 151, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 162, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 164, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 167, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 172, "usage_type": "call"}, {"api_name": "bs4.find", "line_number": 174, "usage_type": "call"}, {"api_name": "bs4.find", "line_number": 176, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 180, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 195, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 199, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 209, "usage_type": "call"}, {"api_name": "bs4.find", "line_number": 210, "usage_type": "call"}, {"api_name": "bs4.find", "line_number": 213, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 219, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 235, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 239, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 249, "usage_type": "call"}, {"api_name": "bs4.find", "line_number": 250, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 254, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 266, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 270, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 280, "usage_type": "call"}, {"api_name": "bs4.find", "line_number": 281, "usage_type": "call"}, {"api_name": "bs4.find", "line_number": 282, "usage_type": "call"}, {"api_name": "bs4.find", "line_number": 283, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 287, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 297, "usage_type": "call"}, {"api_name": "bs4.find", "line_number": 298, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 306, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 342, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 346, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 358, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 362, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 374, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 378, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 395, "usage_type": "call"}]} +{"seq_id": "8952612", "text": "from math import floor\nfrom types import GeneratorType\nfrom lxml import etree\n\nfrom ..settings import VIDEO_ATTRS\nfrom ..utils import INT_TYPES\nfrom .base import Formatter, assert_\nfrom .simple import Sitemap\n\n\nclass VideoFormatter(Formatter):\n\n @classmethod\n def live(cls, value):\n return cls.format_bool(value)\n\n @classmethod\n def family_friendly(cls, value):\n return cls.format_bool(value)\n\n @classmethod\n def requires_subscription(cls, value):\n return cls.format_bool(value)\n\n @classmethod\n def publication_date(cls, value):\n return cls.format_datetime(value)\n\n @classmethod\n def expiration_date(cls, value):\n return cls.format_datetime(value)\n\n def publication(self, value):\n for tag in ('name', 'language'):\n tagelem = etree.Element(self.builder.ns_format(tag, 'news'))\n tagelem.text = value[tag]\n yield tagelem\n\n @staticmethod\n def duration(value):\n value = int(value)\n assert_(28800 > value > 0, 'Duration %s invalid, must be less than 8hrs (28800 seconds)', value)\n return str(value)\n\n @staticmethod\n def rating(value):\n if isinstance(value, INT_TYPES):\n value = floor(value * 10) / 10\n assert_(5. >= value >= 0., 'Rating %s invalid, must be between 0 and 5', value)\n return str(value)\n\n @staticmethod\n def view_count(value):\n return str(int(value))\n\n def restriction(self, value):\n relationship, countries = value\n tagelem = etree.Element(self.builder.ns_format('restriction', 'video'),\n relationship=relationship)\n tagelem.text = countries\n yield tagelem\n\n def gallery_loc(self, value):\n try:\n url, title = value\n attrs = {'title': title}\n except ValueError:\n url, attrs = value, {}\n tagelem = etree.Element(self.builder.ns_format('gallery_loc', 'video'), **attrs)\n tagelem.text = title\n yield tagelem\n\n def prices(self, value):\n for attrs in value:\n amount = attrs.pop('value')\n tagelem = etree.Element(self.builder.ns_format('price', 'video'), **attrs)\n tagelem.text = str(amount)\n yield tagelem\n\n def uploader(self, value):\n # TODO: domain must match site\n try:\n name, url = value\n attrs = {'info': url}\n except ValueError:\n name, attrs = value, {}\n tagelem = etree.Element(self.builder.ns_format('uploader', 'video'), **attrs)\n tagelem.text = name\n yield tagelem\n\n\nclass VideoSitemap(Sitemap):\n formatter_class = VideoFormatter\n nsmap = {\n None: 'http://www.sitemaps.org/schemas/sitemap/0.9',\n 'video': 'http://www.google.com/schemas/sitemap-video/1.1'\n }\n\n def render_obj(self, obj):\n elem = super(VideoSitemap, self).render_obj(obj)\n videoelem = etree.SubElement(elem, self.ns_format('video', 'video'), nsmap=self.nsmap)\n for attr in VIDEO_ATTRS:\n value = self._get(attr, obj)\n if value is None:\n continue\n if hasattr(self.formatter, attr):\n value = getattr(self.formatter, attr)(value)\n if isinstance(value, GeneratorType):\n for ele in value:\n videoelem.append(ele)\n continue\n subelem = etree.SubElement(videoelem, self.ns_format(attr, 'video'))\n subelem.text = value\n return elem\n", "sub_path": "sitemapext/builder/video.py", "file_name": "video.py", "file_ext": "py", "file_size_in_byte": 3549, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "base.Formatter", "line_number": 11, "usage_type": "name"}, {"api_name": "lxml.etree.Element", "line_number": 35, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 35, "usage_type": "name"}, {"api_name": "base.assert_", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.INT_TYPES", "line_number": 47, "usage_type": "argument"}, {"api_name": "math.floor", "line_number": 48, "usage_type": "call"}, {"api_name": "base.assert_", "line_number": 49, "usage_type": "call"}, {"api_name": "lxml.etree.Element", "line_number": 58, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 58, "usage_type": "name"}, {"api_name": "lxml.etree.Element", "line_number": 69, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 69, "usage_type": "name"}, {"api_name": "lxml.etree.Element", "line_number": 76, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 76, "usage_type": "name"}, {"api_name": "lxml.etree.Element", "line_number": 87, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 87, "usage_type": "name"}, {"api_name": "simple.Sitemap", "line_number": 92, "usage_type": "name"}, {"api_name": "lxml.etree.SubElement", "line_number": 101, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 101, "usage_type": "name"}, {"api_name": "settings.VIDEO_ATTRS", "line_number": 102, "usage_type": "name"}, {"api_name": "types.GeneratorType", "line_number": 108, "usage_type": "argument"}, {"api_name": "lxml.etree.SubElement", "line_number": 112, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 112, "usage_type": "name"}]} +{"seq_id": "650601599", "text": "# Import standart libraries\nimport os\nfrom flask import Flask, jsonify, request\nfrom google.cloud import vision\nfrom firebase_admin import firestore\n\n# Initialize Flask Application\napp = Flask(__name__)\n\n# API that returns JSON as a result after processing an image\n@app.route(\"/analyze\", methods=[\"GET\", \"POST\"])\ndef analyze():\n # Creates AutoVision client\n visionClient = vision.ImageAnnotatorClient()\n\n # Receive the uri and file name from url\n uri = request.values.get(\"uri\")\n fileName = request.values.get(\"fileName\")\n\n # Analyze the image from bucket with AutoVision\n result = visionClient.label_detection(image=vision.Image(source=vision.ImageSource(image_uri=uri+fileName)))\n labels = []\n for label in result.label_annotations:\n labels.append(label.description)\n\n # Check whether the image containing a 'food' label\n filters = [\"Food\", \"Recipe\", \"Ingredient\"]\n for filter in filters:\n if filter in labels:\n return jsonify({\n \"status\": 200,\n \"message\": {\n \"imagePath\": fileName,\n \"information\": labels\n }\n }), 200\n else:\n return jsonify({\n \"status\": 200,\n \"message\": {\n \"imagePath\": fileName,\n \"information\": [\"This is not an image containing either Food, Recipe, or Ingredient\"]\n }\n }), 200\n\n# API that returns a list of the image along with information about the image from Firestore\n@app.route(\"/databasequery\", methods=[\"GET\"])\ndef databaseQuery():\n # Creates a Firestore client\n firestoreClient = firestore.Client()\n # Create a reference to the 'images' collection\n collectionRef = firestoreClient.collection(u\"images\")\n\n # Query all the document listed in the 'images' collection\n queryResult = []\n for doc in collectionRef.stream():\n docContent = []\n for field, info in doc.to_dict().items():\n docContent.append({field: info})\n\n queryResult.append(\n {\n \"docId\": doc.id,\n \"docContent\": docContent\n }\n )\n\n return jsonify({\n \"status\": 200,\n \"message\": queryResult\n })\n\n# API that returns a specific image along with information about the image from Firestore\n@app.route(\"/query\", methods=[\"GET\"])\ndef query():\n # Recieve the image file name from url\n image = request.values.get(\"image\")\n # Creates a Firestore client\n firestoreClient = firestore.Client()\n # Create a reference to the 'images' collection and find specific image\n documentRef = firestoreClient.collection(u\"images\").document(image)\n # Get the image data\n doc = documentRef.get()\n if doc.exists:\n return jsonify({\n \"status\": 200,\n \"message\": doc.to_dict()\n }), 200\n\nport = int(os.environ.get(\"PORT\", 8080))\nif __name__ == \"__main__\":\n app.run(debug=True, host=\"0.0.0.0\", port=port)\n", "sub_path": "api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 3022, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "google.cloud.vision.ImageAnnotatorClient", "line_number": 14, "usage_type": "call"}, {"api_name": "google.cloud.vision", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "google.cloud.vision.Image", "line_number": 21, "usage_type": "call"}, {"api_name": "google.cloud.vision", "line_number": 21, "usage_type": "name"}, {"api_name": "google.cloud.vision.ImageSource", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 38, "usage_type": "call"}, {"api_name": "firebase_admin.firestore.Client", "line_number": 50, "usage_type": "call"}, {"api_name": "firebase_admin.firestore", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 77, "usage_type": "name"}, {"api_name": "firebase_admin.firestore.Client", "line_number": 79, "usage_type": "call"}, {"api_name": "firebase_admin.firestore", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 85, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 90, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 90, "usage_type": "attribute"}]} +{"seq_id": "642423114", "text": "import rospy\nimport tf\nfrom geometry_msgs.msg import Point\n\nclass Analysis():\n def __init__(self):\n rospy.init_node('analysis', anonymous=False)\n\n rospy.on_shutdown(self.shutdown)\n\n self.tf_listener = tf.TransformListener()\n self.tf_listener.waitForTransform(\"/map\", \"/base_link\", rospy.Time(), rospy.Duration(5.0))\n\n def run(self):\n while not rospy.is_shutdown():\n self.position = self.get_position()\n self.x = self.position.x\n self.y = self.position.y\n rospy.loginfo(\"robot at x:%.3f,y:%.3f\" %(self.x, self.y))\n\n\n def get_position(self):\n # Get the current transform between the map and base_link frames\n try:\n # self.tf_listener.lookupTransform(\"/map\", \"/base_link\", rospy.Time(0), transform)\n (transform, rotation) = self.tf_listener.lookupTransform(\"/map\", \"/base_link\", rospy.Time(0))\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n rospy.loginfo(\"TF Exception\")\n return\n return Point(*transform)\n\n def shutdown(self):\n rospy.loginfo(\"Stopping the robot...\")\n rospy.sleep(1)\n \nif __name__ == '__main__':\n test = Analysis()\n rospy.spin()", "sub_path": "tourguide/scripts/analysis.py", "file_name": "analysis.py", "file_ext": "py", "file_size_in_byte": 1249, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "rospy.init_node", "line_number": 7, "usage_type": "call"}, {"api_name": "rospy.on_shutdown", "line_number": 9, "usage_type": "call"}, {"api_name": "tf.TransformListener", "line_number": 11, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 12, "usage_type": "call"}, {"api_name": "rospy.Duration", "line_number": 12, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 15, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 19, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 26, "usage_type": "call"}, {"api_name": "tf.Exception", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tf.ConnectivityException", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tf.LookupException", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rospy.loginfo", "line_number": 28, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Point", "line_number": 30, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 33, "usage_type": "call"}, {"api_name": "rospy.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "rospy.spin", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "581532049", "text": "import numpy as np\nfrom theano.tensor import as_tensor_variable\nfrom ContinuousTimeMarkovModel.distributions import *\nfrom pymc3 import Model, sample, Metropolis, Dirichlet, Potential, Binomial, Beta, Slice, NUTS\nimport theano.tensor as TT\nfrom ContinuousTimeMarkovModel.samplers.forwardS import *\nfrom ContinuousTimeMarkovModel.samplers.forwardX import *\n\n#import sys; sys.setrecursionlimit(50000)\n#theano.config.compute_test_value = 'off'\n\nN = 100 # Number of patients\nM = 6 # Number of hidden states\nK = 10 # Number of comorbidities\nD = 721 # Number of claims\nDd = 80 # Maximum number of claims that can occur at once\nmin_obs = 10 # Minimum number of observed claims per patient\nmax_obs = 30 # Maximum number of observed claims per patient\n\n# Load pre-generated data\nfrom pickle import load\n\n\nS_start = load(open('../data/X_layer_100_patients/S.pkl', 'rb'))\n''' S_start[zeroIndices]\n[3, 0, 0, 4, 1, 0, 3, 4, 4, 2, 2, 4, 5, 2, 2, 2, 2, 0, 2, 1, 1, 0, 1, 0, 3, 4, 0, 0, 3, 4, 1, 5, 0, 5, 3, 0, 3, 2, 4, 1, 4, 5, 4, 0, 1, 1, 1, 2, 3, 0, 1, 3, 0, 2, 4, 2, 4, 3, 5, 0, 4, 0, 1, 4, 4, 0, 4, 1, 3, 2, 2, 0, 0, 2, 4, 4, 4, 5, 0, 2, 2, 0, 1, 2, 2, 3, 5, 3, 3, 4, 2, 2, 4, 3, 5, 5, 3, 2, 0, 3]\n'''\nX_start = load(open('../data/X_layer_100_patients/X.pkl', 'rb'))\nZ_start = load(open('../data/X_layer_100_patients/Z.pkl', 'rb'))\nL_start = load(open('../data/X_layer_100_patients/L.pkl', 'rb'))\nobs_jumps = load(open('../data/X_layer_100_patients/obs_jumps.pkl', 'rb'))\nT = load(open('../data/X_layer_100_patients/T.pkl', 'rb'))\nO = load(open('../data/X_layer_100_patients/O_input.pkl', 'rb'))\n\n'''\nT = load(open('../data/synthetic2000/T.pkl', 'rb'))\nobs_jumps = load(open('../data/synthetic2000/obs_jumps.pkl', 'rb'))\nS_start = load(open('../data/synthetic2000/S.pkl', 'rb'))\nX_start = load(open('../data/synthetic2000/X.pkl', 'rb'))\nZ_start = load(open('../data/synthetic2000/Z.pkl', 'rb'))\nL_start = load(open('../data/synthetic2000/L.pkl', 'rb'))\nO = load(open('../data/synthetic2000/O_input.pkl', 'rb'))\n\n\nT = load(open('../data/small_model/data/T.pkl', 'rb'))\nobs_jumps = load(open('../data/small_model/data/obs_jumps.pkl', 'rb'))\nS_start = load(open('../data/small_model/data/S.pkl', 'rb'))\nX_start = load(open('../data/small_model/data/X.pkl', 'rb'))\nZ_start = load(open('../data/small_model/data/Z.pkl', 'rb'))\nL_start = load(open('../data/small_model/data/L.pkl', 'rb'))\nO = load(open('../data/small_model/data/O_input.pkl', 'rb'))\n'''\n\n#DES: nObs is total number of observations\nnObs = T.sum()\n#compress n and t indices\n# S is (nObs) vector\nS_start = np.concatenate([S_start[i,0:T[i]] for i in range(N)])\n# add 0 to start for intial steps\nobs_jumps = np.hstack([np.zeros((N,1),dtype='int8'),obs_jumps])\nobs_jumps = np.concatenate([obs_jumps[i,0:T[i]] for i in range(N)])\n# X is now (nObs,K)\nX_start = np.concatenate([X_start[:,0:T[i],i].T for i in range(N)])\n# O is now (nObs, Dd)\n# TODO: implement this with sparse matrices\nO = np.concatenate([O[:,0:T[i],i].T for i in range(N)])\n\n#import pdb; pdb.set_trace()\n\n\nmodel = Model()\nwith model:\n #Fails: #pi = Dirichlet('pi', a = as_tensor_variable([0.147026,0.102571,0.239819,0.188710,0.267137,0.054738]), shape=M, testval = np.ones(M)/float(M))\n pi = Dirichlet('pi', a = as_tensor_variable([0.147026,0.102571,0.239819,0.188710,0.267137,0.054738]), shape=M)\n pi_min_potential = Potential('pi_min_potential', TT.switch(TT.min(pi) < .001, -np.inf, 0))\n\n Q = DiscreteObsMJP_unif_prior('Q', M=M, lower=0.0, upper=1.0, shape=(M,M))\n \n #S = DiscreteObsMJP('S', pi=pi, Q=Q, M=M, nObs=nObs, observed_jumps=obs_jumps, T=T, shape=(nObs), testval=np.ones(nObs,dtype='int32'))\n S = DiscreteObsMJP('S', pi=pi, Q=Q, M=M, nObs=nObs, observed_jumps=obs_jumps, T=T, shape=(nObs))\n\n #B0 = Beta('B0', alpha = 1., beta = 1., shape=(K,M), testval=0.2*np.ones((K,M)))\n #B = Beta('B', alpha = 1., beta = 1., shape=(K,M), testval=0.2*np.ones((K,M)))\n B0 = Beta('B0', alpha = 1., beta = 1., shape=(K,M))\n B = Beta('B', alpha = 1., beta = 1., shape=(K,M))\n\n #X = Comorbidities('X', S=S, B0=B0,B=B, T=T, shape=(nObs, K), testval=np.ones((nObs,K),dtype='int8'))\n X = Comorbidities('X', S=S, B0=B0,B=B, T=T, shape=(nObs, K))\n\n #Z = Beta('Z', alpha = 0.1, beta = 1., shape=(K,D), testval=0.5*np.ones((K,D)))\n #L = Beta('L', alpha = 1., beta = 1., shape=D, testval=0.5*np.ones(D))\n Z = Beta('Z', alpha = 0.1, beta = 1., shape=(K,D))\n L = Beta('L', alpha = 1., beta = 1., shape=D)\n O_obs = Claims('O_obs', X=X, Z=Z, L=L, T=T, D=D, O_input=O, shape=(nObs,Dd), observed=O)\n #O_obs = Claims('O_obs', X=X, Z=Z, L=L, T=T, D=D, max_obs=max_obs, O_input=O, shape=(Dd,max_obs,N), observed=O)\n#import pdb; pdb.set_trace()\n\nimport scipy.special\nQ_raw_log = scipy.special.logit(np.array([0.631921, 0.229485, 0.450538, 0.206042, 0.609582]))\n\nfrom scipy.special import logit\n\nB_lo = logit(np.array([\n[0.000001,0.760000,0.720000,0.570000,0.700000,0.610000],\n[0.000001,0.460000,0.390000,0.220000,0.200000,0.140000],\n[0.000001,0.620000,0.620000,0.440000,0.390000,0.240000],\n[0.000001,0.270000,0.210000,0.170000,0.190000,0.070000],\n[0.000001,0.490000,0.340000,0.220000,0.160000,0.090000],\n[0.000001,0.620000,0.340000,0.320000,0.240000,0.120000],\n[0.000001,0.550000,0.390000,0.320000,0.290000,0.150000],\n[0.000001,0.420000,0.240000,0.170000,0.170000,0.110000],\n[0.000001,0.310000,0.300000,0.230000,0.190000,0.110000],\n[0.000001,0.470000,0.340000,0.190000,0.190000,0.110000]]))\n\nB0_lo = logit(np.array([\n[0.410412,0.410412,0.418293,0.418293,0.429890,0.429890],\n[0.240983,0.240983,0.240983,0.240983,0.240983,0.240983],\n[0.339714,0.339714,0.339714,0.339714,0.339714,0.339714],\n[0.130415,0.130415,0.130415,0.130415,0.130415,0.130415],\n[0.143260,0.143260,0.143260,0.143260,0.143260,0.143260],\n[0.211465,0.211465,0.211465,0.211465,0.211465,0.211465],\n[0.194187,0.194187,0.194187,0.194187,0.194187,0.194187],\n[0.185422,0.185422,0.185422,0.185422,0.185422,0.185422],\n[0.171973,0.171973,0.171973,0.171973,0.171973,0.171973],\n[0.152277,0.152277,0.152277,0.152277,0.152277,0.152277]]))\n\n#DES Random inputs\nranSeed = 144\nnp.random.seed(ranSeed)\nL_start = np.random.rand(D)\nnp.random.seed(ranSeed+1)\nZ_start = np.random.rand(K,D)\nnp.random.seed(ranSeed+2)\nB_lo = logit(np.random.rand(K,M))\nnp.random.seed(ranSeed+3)\nB0_lo = logit(np.random.rand(K,M))\n\nZ_lo = logit(Z_start)\nL_lo = logit(L_start)\n#L_lo = np.ones_like(L_start)*-4.0\n'''\nQ_raw_log = np.log(np.array([[1, 0.0000001, 0.0000001, 0.0000001, 0.0000001], \n [0.0000001, 1, 0.0000001, 0.0000001, 0.0000001],\n [0.0000001, 0.0000001, 1, 0.0000001, 0.0000001],\n [0.0000001, 0.0000001, 0.0000001, 1, 0.0000001],\n [0.0000001, 0.0000001, 0.0000001, 0.0000001, 1],\n [0.0000001, 0.0000001, 0.0000001, 0.0000001, 0.0000001]]))\n'''\n\nstart = {'Q_ratematrixoneway': Q_raw_log, 'B_logodds':B_lo, 'B0_logodds':B0_lo, 'S':S_start, 'X':X_start, 'Z_logodds':Z_lo, 'L_logodds':L_lo}\n#teststart = {'Q_ratematrixoneway': Q_raw_log, 'B_logodds':B_lo, 'B0_logodds':B0_lo, 'S':S_start, 'X':X_start, 'Z_logodds':Z_lo, 'L_logodds':L_lo, 'pi_stickbreaking':np.ones(M)/float(M)}\n#start = {'Q_ratematrixoneway': Q_raw_log, 'B_logodds':B_lo, 'B0_logodds':B0_lo, 'S':S_start, 'X':X_start, 'Z_logodds':Z_lo, 'L_logodds':L_start}\n\nwith model:\n #import pdb; pdb.set_trace()\n\n steps = []\n steps.append(NUTS(vars=[pi]))\n #steps.append(NUTS(vars=[pi], scaling=np.ones(M-1)*0.058))\n #steps.append(Metropolis(vars=[pi], scaling=0.058, tune=False))\n steps.append(NUTS(vars=[Q],scaling=np.ones(M-1,dtype=float)*10.))\n #steps.append(Metropolis(vars=[Q], scaling=0.2, tune=False))\n steps.append(ForwardS(vars=[S], nObs=nObs, T=T, N=N, observed_jumps=obs_jumps))\n steps.append(NUTS(vars=[B0,B]))\n #steps.append(Metropolis(vars=[B0], scaling=0.2, tune=False))\n #steps.append(NUTS(vars=[B]))\n #steps.append(Metropolis(vars=[B], scaling=0.198, tune=False))\n steps.append(ForwardX(vars=[X], N=N, T=T, K=K, D=D,Dd=Dd, O=O, nObs=nObs))\n #steps.append(NUTS(vars=[Z], scaling=np.ones(K*D)))\n steps.append(Metropolis(vars=[Z], scaling=0.0132, tune=False))\n steps.append(NUTS(vars=[L],scaling=np.ones(D)))\n #steps.append(Metropolis(vars=[L],scaling=0.02, tune=False, ))\n\n## 22 minutes per step with all NUTS set\n\n #import pdb; pdb.set_trace()\n #model.dlogp()\n trace = sample(1001, steps, start=start, random_seed=111,progressbar=True)\n #trace = sample(11, steps, start=start, random_seed=111,progressbar=True)\n #trace = sample(11, steps, start=start, random_seed=[111,112,113],progressbar=False,njobs=3)\n\npi = trace[pi]\nQ = trace[Q]\nS = trace[S]\n#S0 = S[:,0] #now pibar\nB0 = trace[B0]\nB = trace[B]\nX = trace[X]\nZ = trace[Z]\nL = trace[L]\n#Sbin = np.vstack([np.bincount(S[i]) for i in range(len(S))])\nSbin = np.vstack([np.bincount(S[i],minlength=6)/float(len(S[i])) for i in range(len(S))])\nzeroIndices = np.roll(T.cumsum(),1)\nzeroIndices[0] = 0\npibar = np.vstack([np.bincount(S[i][zeroIndices],minlength=M)/float(zeroIndices.shape[0]) for i in range(len(S))])\npibar = np.vstack([np.bincount(S_start[zeroIndices],minlength=M)/float(zeroIndices.shape[0]),pibar])\nSEnd = np.vstack([np.bincount(S[i][zeroIndices-1],minlength=M)/float(zeroIndices.shape[0]) for i in range(len(S))])\nSEnd = np.vstack([np.bincount(S_start[zeroIndices-1],minlength=M)/float(zeroIndices.shape[0]),SEnd])\n#logp = steps[1].logp\nlogp = steps[2].logp\nXlogp = steps[4].logp\nXChanges = np.insert(1-(1-(X[:,1:]-X[:,:-1])).prod(axis=2),0,0,axis=1)\nXChanges.T[zeroIndices] = 0\nXChanges[XChanges.nonzero()] = XChanges[XChanges.nonzero()]/XChanges[XChanges.nonzero()]\nXChanges = XChanges.sum(axis=1)/float(N)\nlogpTotal = [model.logp(trace[i]) for i in range(len(trace))]\n\n#np.set_printoptions(2);np.set_printoptions(linewidth=160)\n'''\nfor i in range(1001):\n print \"~~~\",i ,\"~~~\"\n print pi[i,:]\n print \"Bincount S0:\", np.bincount(S0[i,:],minlength=6)\n print \"\\n\"\n'''\n\n#from pickle import dump\n#with open('file.pkl','wb') as file:\n# dump(trace,file)\n", "sub_path": "examples/debug_example_main.py", "file_name": "debug_example_main.py", "file_ext": "py", "file_size_in_byte": 10078, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pickle.load", "line_number": 24, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 28, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 29, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 30, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 31, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 32, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 66, "usage_type": "call"}, {"api_name": "pymc3.Model", "line_number": 71, "usage_type": "call"}, {"api_name": "pymc3.Dirichlet", "line_number": 74, "usage_type": "call"}, {"api_name": "theano.tensor.as_tensor_variable", "line_number": 74, "usage_type": "call"}, {"api_name": "pymc3.Potential", "line_number": 75, "usage_type": "call"}, {"api_name": "theano.tensor.switch", "line_number": 75, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 75, "usage_type": "name"}, {"api_name": "theano.tensor.min", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pymc3.Beta", "line_number": 84, "usage_type": "call"}, {"api_name": "pymc3.Beta", "line_number": 85, "usage_type": "call"}, {"api_name": "pymc3.Beta", "line_number": 92, "usage_type": "call"}, {"api_name": "pymc3.Beta", "line_number": 93, "usage_type": "call"}, {"api_name": "scipy.special.special.logit", "line_number": 99, "usage_type": "call"}, {"api_name": "scipy.special.special", "line_number": 99, "usage_type": "attribute"}, {"api_name": "scipy.special", "line_number": 99, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "scipy.special.logit", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 103, "usage_type": "call"}, {"api_name": "scipy.special.logit", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 130, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 132, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 133, "usage_type": "attribute"}, {"api_name": "scipy.special.logit", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 134, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 135, "usage_type": "attribute"}, {"api_name": "scipy.special.logit", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 136, "usage_type": "attribute"}, {"api_name": "scipy.special.logit", "line_number": 138, "usage_type": "call"}, {"api_name": "scipy.special.logit", "line_number": 139, "usage_type": "call"}, {"api_name": "pymc3.NUTS", "line_number": 158, "usage_type": "call"}, {"api_name": "pymc3.NUTS", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 161, "usage_type": "call"}, {"api_name": "pymc3.NUTS", "line_number": 164, "usage_type": "call"}, {"api_name": "pymc3.Metropolis", "line_number": 170, "usage_type": "call"}, {"api_name": "pymc3.NUTS", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 171, "usage_type": "call"}, {"api_name": "pymc3.sample", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 202, "usage_type": "call"}]} +{"seq_id": "75517818", "text": "# coding:utf-8\nimport keras\nimport sys, os\nimport scipy\nimport scipy.misc\nimport numpy as np\nfrom keras.models import model_from_json\nfrom keras.preprocessing.image import load_img\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Input\nfrom keras.models import Model\nimport collections #kerasと関係ないです。\nimport json\nfrom keras.backend import tensorflow_backend as backend\nimport time as t\nimport re #正解判別\nstart_t=t.time()\n#画像サイズ\nimsize = (96, 96)\n#人数\npeople=9\n#認識したい画像のパスを指定する\n# ./blog_testpic/xxx.jpg といった指定を意味する\ntestpic = \"./Auth/\"\n#使用するモデルを指定する\nkeras_model = \"./model/face.json\"\nkeras_param = \"./model/face-model.h5\"\n#合格点(0~1まで。何点以上ならその人と判定するか\nPassScore=0.9\n#画像の読み込み\ndef get_file(dir_path):\n filenames = os.listdir(dir_path)\n return filenames\n\n\n\n\n#メイン開始\nif __name__ == \"__main__\":\n #画像を読み込んで、ファイル名をリスト化する。\n pic = get_file(testpic)\n print(pic)\n cnt=0 #正解数初期化\n #モデルの読み込み\n start_tj=t.time()\n model = model_from_json(open(keras_model).read())\n end_tj=t.time()\n\n\n start_tw=t.time()\n model.load_weights(keras_param)\n end_tw=t.time()\n #model.summary()\n with open(\"./model/categories.json\",'r') as fi: \n classes=json.load(fi)\n classes[\"?\"]=\"Unknown\"\n print(classes)\n ##��こまでで実行するとモデルの形が結果に表示される\n label_array=[]\n #リスト化したファイルから読み込んで処理する\n for i in pic:\n print(i) # ファイル名の出力\n \n #画像ディレクトリにあるファイルのi番目を読み込み\n img = load_img(testpic + i,target_size=(224,224))\n # 画像を要素に取る配列(images)にする必要がある\n images = np.array([np.array(img)])\n \n start_tp=t.time()\n prd = model.predict(images)\n end_tp=t.time()\n\n for j in range(people):\n print(classes[str(j)]+\"の確率->\"+\"{0:3.3f}\".format(prd[0][j]*100)+\"%\")\n \n #確信度最大値を取得する\n prelabel = prd.argmax(axis=1)\n if(prd.max()>PassScore):\n label=prelabel[0]\n else:\n label='?'\n label_array.append(label)\n #print([classes[c] for c in str(label)])\n print(classes[str(label)])\n if re.match(classes[str(label)], i):\n cnt+=1\n print(\"正解^-^\")\n else:\n print(\"不正解-_-\")\n print()\n print()\n backend.clear_session()\n end_t=t.time()\n print(\"*---結果---*\")\n print(\"CKFaceの解答=>\",str(label_array)) \n print(\"[DEBUG]正解数\" + str(cnt) + \"/\" + str(len(label_array)))\n print(\"[DEBUG]正答率\"+str(cnt/len(label_array)*100)+\"%\")\n countLabel=collections.Counter(label_array)\n result=countLabel.most_common(1)\n # print(\"あなたは\"+classes[str(result[0][0])]+\"さんです。\")\n\n \n \n print(\"【keras_auth.py】\"+\"{0:.5f}\".format(end_t-start_t)+\"秒\")\n print(\"【model_from_json】\"+\"{0:.5f}\".format(end_tj-start_tj)+\"秒\")\n print(\"【load_weights】\"+\"{0:.5f}\".format(end_tw-start_tw)+\"秒\")\n print(\"【predict】\"+\"{0:.5f}\".format(end_tp-start_tp)+\"秒\")\n\n\n", "sub_path": "keras_auth.py", "file_name": "keras_auth.py", "file_ext": "py", "file_size_in_byte": 3442, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "time.time", "line_number": 17, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "time.time", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.models.model_from_json", "line_number": 46, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "time.time", "line_number": 52, "usage_type": "call"}, {"api_name": "json.load", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "time.time", "line_number": 69, "usage_type": "call"}, {"api_name": "time.time", "line_number": 71, "usage_type": "call"}, {"api_name": "re.match", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.backend.tensorflow_backend.clear_session", "line_number": 92, "usage_type": "call"}, {"api_name": "keras.backend.tensorflow_backend", "line_number": 92, "usage_type": "name"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "636303832", "text": "\"\"\"Run two sample T-test\n\nAuthors: Hao-Ting Wang\nDate: May 20, 2021\n\"\"\"\nimport os, sys\nfrom pathlib import Path\nimport pandas as pd\n\nfrom nilearn.glm.second_level import SecondLevelModel\nfrom nilearn.glm.thresholding import threshold_stats_img\nfrom nilearn.reporting import make_glm_report\n\n\nseed = sys.argv[1]\nmultiple_comparison = sys.argv[2]\n\nproject_path = Path(__file__).parents[2]\n\nalpha = {\n \"bonferroni\": 0.05,\n \"fpr\": 0.001\n}\n\ndiff_path = (project_path / \"results/subject_level\").glob(\n f\"sub-*/{seed}_typhoid_wrt_placebo_effect_size.nii.gz\"\n)\nresults_path = project_path / f\"results/group_level/{seed}_double_twosample_t\"\nreport_path = project_path / f\"results/{seed}_double_twosample_t.html\"\nreport_title = f\"{seed}: Patients vs Controls when typhoid > placebo\"\n\nif not results_path.exists():\n os.makedirs(results_path)\n\ngm_mask = str(project_path / \"atlas/custom_gm_mask.nii.gz\")\n\n\ndef group_level(input_imgs, design_matrix, contrasts, title, results_path):\n # gray matter mask to remove area with no BOLD signal\n group_level_model = SecondLevelModel(mask_img=gm_mask, smoothing_fwhm=0, verbose=1)\n print(\"fit parametric model\")\n group_level_model = group_level_model.fit(input_imgs, design_matrix=design_matrix)\n\n for con_name, con in contrasts.items():\n print(con_name)\n z_map = group_level_model.compute_contrast(con, output_type=\"z_score\")\n z_map.to_filename(str(results_path / f\"{con_name}_zstat.nii.gz\"))\n thresh_z, _ = threshold_stats_img(\n z_map,\n gm_mask,\n height_control=multiple_comparison,\n alpha=alpha[multiple_comparison],\n cluster_threshold=10,\n )\n thresh_z.to_filename(str(results_path / f\"{con_name}_thresh_zstat.nii.gz\"))\n return group_level_model\n\n\n# load first level effect maps\nsecond_level_input = pd.DataFrame()\nfor file in diff_path:\n effects_map_path = str(file)\n subject_label = str(file.parent).split(\"sub-\")[-1]\n df = pd.DataFrame(\n [subject_label, effects_map_path], index=[\"subject_label\", \"effects_map_path\"]\n ).T\n second_level_input = pd.concat([second_level_input, df], axis=0)\nsecond_level_input = second_level_input.set_index(\"subject_label\").sort_index()\n\n# create design matrix\ngroup_info = pd.read_csv(project_path / \"analysis/group_design.csv\", index_col=0)\ngroup_info.index = group_info.index.map(str)\ngroup_info = pd.concat([second_level_input, group_info], axis=1)\ngroup_info.to_csv(results_path / \"inputs.csv\")\ndesign_matrix = group_info[[\"Sex\", \"Age\", \"control\", \"patient\"]]\ndesign_matrix.to_csv(results_path / \"two-sample-t-test_design-matrix.csv\")\n\n# generate parametric report\ninput_imgs = group_info[\"effects_map_path\"].tolist()\nprint(\"generate parametric report\")\ndesign_matrix = group_info[[\"Sex\", \"Age\", \"control\", \"patient\"]]\ncontrasts = {\n \"typhoid_wrt_placebol\": [\n 0,\n 0,\n 1,\n 1,\n ],\n \"placebol_wrt_typhoid\": [\n 0,\n 0,\n -1,\n -1,\n ],\n \"control\": [\n 0,\n 0,\n 1,\n 0,\n ],\n \"patient\": [\n 0,\n 0,\n 0,\n 1,\n ],\n \"control_wrt_patient\": [0, 0, 1, -1],\n \"patient_wrt_control\": [0, 0, -1, 1],\n}\ngroup_level_model = group_level(\n input_imgs, design_matrix, contrasts, report_title, results_path\n)\n\nreport = make_glm_report(\n group_level_model,\n contrasts=contrasts,\n title=report_title,\n height_control=multiple_comparison,\n alpha=alpha[multiple_comparison],\n cluster_threshold=10,\n)\nreport.save_as_html(report_path)\n", "sub_path": "analysis/src/group_twosamplet_interaction.py", "file_name": "group_twosamplet_interaction.py", "file_ext": "py", "file_size_in_byte": 3581, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 33, "usage_type": "call"}, {"api_name": "nilearn.glm.second_level.SecondLevelModel", "line_number": 40, "usage_type": "call"}, {"api_name": "nilearn.glm.thresholding.threshold_stats_img", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 73, "usage_type": "call"}, {"api_name": "nilearn.reporting.make_glm_report", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "410044478", "text": "from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom Data.States import *\nfrom Dialogs.superadmin.Doctors.removeDoctor import *\nfrom Dialogs.messageBox import *\n\nclass selectDoctor(object):\n def __init__(self):\n self.last_city = ''\n self.hospital_list = []\n self.doctor_list = []\n self.last_hospital = ''\n def setup(self, selectDoctor):\n selectDoctor.setObjectName(\"selectDoctor\")\n selectDoctor.resize(380, 407)\n selectDoctor.setWindowTitle(\"\")\n self.title = QtWidgets.QLabel(selectDoctor)\n self.title.setGeometry(QtCore.QRect(30, 0, 331, 51))\n self.title.setObjectName(\"title\")\n self.frame = QtWidgets.QFrame(selectDoctor)\n self.frame.setGeometry(QtCore.QRect(10, 60, 351, 291))\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.cityComboBox = QtWidgets.QComboBox(self.frame)\n self.cityComboBox.setGeometry(QtCore.QRect(160, 140, 161, 27))\n self.cityComboBox.setObjectName(\"cityComboBox\")\n self.doctorComboBox = QtWidgets.QComboBox(self.frame)\n self.doctorComboBox.setGeometry(QtCore.QRect(160, 240, 161, 27))\n self.doctorComboBox.setObjectName(\"doctorComboBox\")\n self.stateComboBox = QtWidgets.QComboBox(self.frame)\n self.stateComboBox.setGeometry(QtCore.QRect(160, 90, 161, 27))\n self.stateComboBox.setObjectName(\"stateComboBox\")\n self.cityLabel = QtWidgets.QLabel(self.frame)\n self.cityLabel.setGeometry(QtCore.QRect(40, 140, 111, 31))\n self.cityLabel.setObjectName(\"cityLabel\")\n self.bloodBankLabel = QtWidgets.QLabel(self.frame)\n self.bloodBankLabel.setGeometry(QtCore.QRect(40, 240, 111, 31))\n self.bloodBankLabel.setObjectName(\"bloodBankLabel\")\n self.stateLabel = QtWidgets.QLabel(self.frame)\n self.stateLabel.setGeometry(QtCore.QRect(40, 90, 111, 31))\n self.stateLabel.setObjectName(\"stateLabel\")\n self.ORLabel = QtWidgets.QLabel(self.frame)\n self.ORLabel.setGeometry(QtCore.QRect(130, 50, 111, 31))\n self.ORLabel.setAlignment(QtCore.Qt.AlignCenter)\n self.ORLabel.setObjectName(\"ORLabel\")\n self.searchByID = QtWidgets.QLineEdit(self.frame)\n self.searchByID.setGeometry(QtCore.QRect(40, 20, 281, 27))\n self.searchByID.setObjectName(\"searchByID\")\n self.hospitalLabel = QtWidgets.QLabel(self.frame)\n self.hospitalLabel.setGeometry(QtCore.QRect(40, 190, 111, 31))\n self.hospitalLabel.setObjectName(\"hospitalLabel\")\n self.hospitalComboBox = QtWidgets.QComboBox(self.frame)\n self.hospitalComboBox.setGeometry(QtCore.QRect(160, 190, 161, 27))\n self.hospitalComboBox.setObjectName(\"hospitalComboBox\")\n self.selectButton = QtWidgets.QPushButton(selectDoctor)\n self.selectButton.setGeometry(QtCore.QRect(140, 360, 80, 28))\n self.selectButton.setObjectName(\"selectButton\")\n\n self.retranslateUi(selectDoctor)\n QtCore.QMetaObject.connectSlotsByName(selectDoctor)\n\n def retranslateUi(self, selectDoctor):\n _translate = QtCore.QCoreApplication.translate\n selectDoctor.setWindowTitle(_translate(\"selectDoctor\", \" \"))\n self.title.setText(_translate(\"selectDoctor\",\n \"

Select Doctor

\"))\n self.cityLabel.setText(_translate(\"selectDoctor\",\n \"

City :

\"))\n self.bloodBankLabel.setText(_translate(\"selectDoctor\",\n \"

Doctor :

\"))\n self.stateLabel.setText(_translate(\"selectDoctor\",\n \"

State :

\"))\n self.ORLabel.setText(_translate(\"selectDoctor\",\n \"

OR

\"))\n self.searchByID.setPlaceholderText(_translate(\"selectDoctor\", \"Search by Doctor ID\"))\n self.hospitalLabel.setText(_translate(\"selectDoctor\",\n \"

Hospital :

\"))\n self.selectButton.setText(_translate(\"selectDoctor\", \"SELECT\"))\n\n '''\n\n self.clickEvents(selectDoctor)\n\n def clickEvents(self, parent):\n self.stateAddFunction(selectDoctor)\n self.removeButton.clicked.connect(lambda: self.clickOnRemoveButton(parent))\n\n # Do Not Delete This Code Mrudul Add The Hospital ComboBox Here\n\n def clickOnRemoveButton(self, parent):\n id = self.searchByID.text()\n if id != \"\":\n if id.isdigit():\n import requests\n URL = \"https://mdtouch.herokuapp.com/api/doctor/\" + str(id)\n r = requests.get(url= URL)\n data = r.json()\n if data == {\"detail\": \"Not found.\"}:\n self.window = messageBox()\n self.window.infoBox(\"Id Does Not Exists\")\n self.searchByID.setText(\"\")\n return\n else:\n URL = \"https://mdtouch.herokuapp.com/api/hospital/\" + str(data[\"workplace\"])\n r = requests.get(url=URL)\n hdata = r.json()\n parent.close()\n self.window = QDialog()\n self.dialog =\n self.dialog.setup(self.window,data,hdata)\n self.window.setModal(True)\n self.window.show()\n return\n\n else:\n self.window = messageBox()\n self.window.infoBox(\"Id is a Integer\")\n self.searchByID.setText(\"\")\n return\n\n if self.doctorComboBox.count() == 0:\n self.window = messageBox()\n self.window.infoBox(\"Select the hospital first\")\n return\n doctor_name = self.doctorComboBox.currentText()\n doctorData = {}\n hdata = {}\n for i in self.doctor_list:\n if doctor_name == i[\"firstName\"] + \" \" + i[\"lastName\"]:\n doctorData = i\n break\n for i in self.hospital_list:\n if i[\"name\"] == self.hospitalComboBox.currentText():\n hdata = i\n break\n parent.close()\n self.window = QDialog()\n self.dialog = removeDoctor()\n self.dialog.setup(self.window,doctorData,hdata)\n self.window.setModal(True)\n self.window.show()\n\n\n def stateAddFunction(self,parent):\n for i in states.values():\n self.stateComboBox.addItem(i)\n for i in cities[\"Andhra Pradesh\"]:\n self.cityComboBox.addItem(i)\n self.stateComboBox.currentIndexChanged.connect(lambda : self.cityAddFunction(parent))\n self.stateComboBox.currentIndexChanged.connect(lambda :self.hospitalComboBoxAdd(parent))\n #self.stateComboBox.currentIndexChanged.connect(lambda : self.doctorComboBoxAdd(parent))\n\n def cityAddFunction(self,parent):\n state = self.stateComboBox.currentText()\n i = self.cityComboBox.count()\n flag = True\n while i > 0:\n flag = False\n self.cityComboBox.removeItem(0)\n i-=1\n flag = True\n for i in cities[state]:\n flag = False\n self.cityComboBox.addItem(i)\n flag = True\n self.cityComboBox.currentIndexChanged.connect(lambda :self.hospitalComboBoxAdd(parent))\n\n def hospitalComboBoxAdd(self,parent):\n if self.last_city == self.cityComboBox.currentText() or self.cityComboBox.count() != len(cities[self.stateComboBox.currentText()]) or self.cityComboBox.itemText(self.cityComboBox.count()-1) != cities[self.stateComboBox.currentText()][-1]:\n return\n self.last_city = self.cityComboBox.currentText()\n # First Erase all Hospitals\n i = self.hospitalComboBox.count()\n while i > 0:\n i -= 1\n self.hospitalComboBox.removeItem(0)\n\n import requests\n print(self.cityComboBox.currentText())\n URL = \"https://mdtouch.herokuapp.com/api/hospital/\"\n param ={\n \"city\": self.cityComboBox.currentText()\n }\n r = requests.get(url=URL,params=param)\n l = r.json()\n print(l)\n self.hospital_list = l\n for i in l:\n self.hospitalComboBox.addItem(str(i[\"name\"]))\n self.hospitalComboBox.currentIndexChanged.connect(lambda : self.doctorComboBoxAdd(parent))\n\n def doctorComboBoxAdd(self,parent):\n i = self.doctorComboBox.count()\n while i > 0:\n i -= 1\n self.doctorComboBox.removeItem(0)\n workplace_id = 0\n print(self.hospital_list)\n for i in self.hospital_list:\n if i[\"name\"] == self.hospitalComboBox.currentText():\n workplace_id = i[\"id\"]\n break\n if workplace_id == 0:\n return\n\n print(workplace_id)\n import requests\n URL = \"https://mdtouch.herokuapp.com/api/doctor/\"\n param = {\n \"workplace\" : int(workplace_id)\n }\n r = requests.get(url=URL,params=param)\n self.doctor_list = r.json()\n print(self.doctor_list)\n for i in self.doctor_list:\n self.doctorComboBox.addItem(str(i[\"firstName\"]) + \" \" + i[\"lastName\"])\n '''\n\n\n", "sub_path": "Dialogs/superadmin/Doctors/selectDoctor.py", "file_name": "selectDoctor.py", "file_ext": "py", "file_size_in_byte": 10005, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 19, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 19, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 20, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 22, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 22, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 23, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 24, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 24, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 25, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 27, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 28, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 30, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 31, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 31, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 33, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 33, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 34, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 34, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 36, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 36, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 37, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 39, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 39, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 40, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 42, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 42, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 43, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 43, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 45, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 45, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 46, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 46, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 47, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 47, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 49, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 49, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 50, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 50, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 52, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 53, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 53, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 55, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 55, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 56, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 56, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 58, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 58, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 59, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 59, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QMetaObject.connectSlotsByName", "line_number": 63, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QMetaObject", "line_number": 63, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 63, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QCoreApplication", "line_number": 66, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "308732096", "text": "import asyncio\nimport json\nfrom logging import getLogger\nfrom threading import Thread\nfrom agv_data_processor import MessageProcessor, stop_message, create_dict_for_client\n\nUSE_UDP = True\nmessage_processor = MessageProcessor()\nlogger = getLogger(__name__)\n\n\nclass EchoUDPClientProtocol:\n def __init__(self, message, on_con_lost, set_alive=None):\n self.message = message\n self.on_con_lost = on_con_lost\n self.cache = \"\"\n self.transport = None\n self.set_alive = set_alive\n\n def connection_made(self, transport):\n self.transport = transport\n data_to_send = str(len(self.message)) + '\\n' + self.message\n logger.info('%s', data_to_send)\n self.transport.sendto(data_to_send.encode())\n\n def datagram_received(self, data, addr):\n if self.set_alive is not None:\n self.set_alive()\n decoded_data = data.decode()\n if len(decoded_data) > 500:\n logger.warning('ignoring too long message')\n return\n logger.info('received message of len %d: %s', len(decoded_data), decoded_data)\n try:\n results, self.cache = self.decode(decoded_data, self.cache)\n except:\n logger.exception('could not decode messgae')\n return\n parsed_res = create_dict_for_client(results[-1])\n self.message_processor.handle_command(parsed_res)\n logger.info(\"Close the socket\")\n self.transport.close()\n\n def error_received(self, exc):\n logger.exception('Error received:', exc)\n pass\n\n def connection_lost(self, exc):\n message_processor.handle_command(stop_message)\n logger.info(\"Connection closed\")\n self.on_con_lost.set_result(True)\n\n @staticmethod\n def decode(to_decode, client_cache):\n res = []\n str_to_decode = client_cache + to_decode\n cache = \"\"\n while len(str_to_decode) > 0:\n break_ind = str_to_decode.find(\"\\n\")\n str_len = int(str_to_decode[:break_ind])\n if str_len <= len(str_to_decode):\n s = str_to_decode[break_ind + 1:break_ind + 1 + str_len]\n try:\n r = json.loads(s)\n res.append(r)\n except ValueError:\n logger.exception('could not decode %s', s)\n str_to_decode = str_to_decode[break_ind + 1 + str_len:]\n else:\n cache = str_to_decode\n return res, cache\n\n\nasync def main(message_processor, host, port, set_alive):\n # Get a reference to the event loop as we plan to use\n # low-level APIs.\n loop = asyncio.get_running_loop()\n\n on_con_lost = loop.create_future()\n message = message_processor.agv_message()\n\n transport, protocol = await loop.create_datagram_endpoint(\n lambda: EchoUDPClientProtocol(message, on_con_lost, set_alive),\n remote_addr=(host, port))\n\n try:\n await on_con_lost\n finally:\n transport.close()\n\nclass UDPClient:\n def __init__(self, message_processor, host, port, set_alive):\n self.message_processor = message_processor\n self.host = host\n self.port = port\n self.set_alive = set_alive\n\n def main_async_server(self):\n asyncio.run(main(self.message_processor, self.host, self.port, self.set_alive))\n\n def run(self):\n t1 = Thread(target=self.main_async_server, args=(self, ))\n t1.start()", "sub_path": "udp_client.py", "file_name": "udp_client.py", "file_ext": "py", "file_size_in_byte": 3436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "agv_data_processor.MessageProcessor", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "agv_data_processor.create_dict_for_client", "line_number": 39, "usage_type": "call"}, {"api_name": "agv_data_processor.stop_message", "line_number": 49, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 64, "usage_type": "call"}, {"api_name": "asyncio.get_running_loop", "line_number": 77, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 99, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "312337519", "text": "import simpy\nimport functools\nimport random\nimport time\nfrom enum import Enum\n\n#ONU class\nclass Traffic(object):\n def __init__ (self, env, dist, line_rate):\n self.env = env\n self.dist = dist\n self.line_rate = line_rate\n self.requests = simpy.Store(self.env)\n self.action = self.env.process(self.run())\n self.generated = False\n #generate traffic\n def run(self):\n while True:\n #if self.generated == False:\n yield self.env.timeout(self.dist(self))\n req = Request(self.env, self.id, self.line_rate)\n self.requests.put(req)\n print(\"To aqui\")\n #else:\n #continue\n\n#ONU class\nclass ONU(Traffic):\n def __init__(self, env, onu_id, dist, line_rate):\n Traffic.__init__(self, env, dist, line_rate)\n self.onu_id = onu_id\n #self.action = self.env.process(self.run())\n\n #gets its request\n def run(self):\n r = yield self.requests.get()\n self.allocate(r)\n yield self.env.timeout(self.dist(self))\n self.deallocate(r)\n\n #starts allocation\n def allocate(self, request):\n r = request\n print(\"Starting allocation of \"+str(r.id))\n\n #starts deallocation\n def deallocate(self, request):\n r = request\n print(str(r.id)+\" is exiting the VPON\")\n\n#a request\nclass Request(object):\n def __init__(self, env, req_id, line_rate):\n self.env = env\n self.id = req_id\n self.line_rate = line_rate\n\n\n#main loop\nenv = simpy.Environment()\ndistribution = lambda x: random.expovariate(10)\nnum_onus = 10\no = ONU(env, 1, distribution, 614.4)\nenv.run()\n", "sub_path": "cfran.py", "file_name": "cfran.py", "file_ext": "py", "file_size_in_byte": 1666, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "simpy.Store", "line_number": 13, "usage_type": "call"}, {"api_name": "simpy.Environment", "line_number": 60, "usage_type": "call"}, {"api_name": "random.expovariate", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "508850319", "text": "#!/usr/bin/env python3\n\nimport requests\n\nremote_url = \"http://192.168.1.182:5000/first_call?id=12345\"\npost_data = {'name':'Art'}\n\nr = requests.post(remote_url,post_data)\nprint(r.status_code, r.reason)\nprint(r.text)\n", "sub_path": "test_post.py", "file_name": "test_post.py", "file_ext": "py", "file_size_in_byte": 215, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.post", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "50048916", "text": "# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/pyramid_fullauth/views/social.py\n# Compiled at: 2017-02-24 16:57:38\n__doc__ = 'Social login/registration view.'\nimport logging\nfrom pyramid.view import view_config\nfrom pyramid.httpexceptions import HTTPRedirection\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy.exc import IntegrityError\nfrom pyramid.compat import text_type\nimport pyramid_basemodel\nfrom pyramid_fullauth.views import BaseView\nfrom pyramid_fullauth.models import User\nfrom pyramid_fullauth.models import AuthenticationProvider\nfrom pyramid_fullauth.events import BeforeSocialRegister\nfrom pyramid_fullauth.events import AfterSocialRegister\nfrom pyramid_fullauth.events import AfterSocialLogIn\nfrom pyramid_fullauth.events import SocialAccountAlreadyConnected\nfrom pyramid_fullauth import tools\nlogger = logging.getLogger(__name__)\n\n@view_config(context='velruse.AuthenticationComplete', permission=NO_PERMISSION_REQUIRED, renderer='pyramid_fullauth:resources/templates/register.mako')\nclass SocialLoginViews(BaseView):\n \"\"\"Social login views definition.\"\"\"\n\n def set_provider(self, user, provider_name, user_provider_id):\n \"\"\"\n Set authentication provider on user.\n\n This method will connect given provider with given user,\n unless provider_id has already been used on another user.\n\n :param pyramid_fullauth.user.User user: user object\n :param str provider_name: provider name\n :param str user_provider_id: user id delivered by given provider\n\n :returns: whether user got/was connected or the connection was made with another user.\n :rtype: bool\n \"\"\"\n if user.id:\n try:\n provider_auth = pyramid_basemodel.Session.query(AuthenticationProvider).filter(AuthenticationProvider.user_id == user.id, AuthenticationProvider.provider == provider_name).one()\n if provider_auth.provider_id != user_provider_id:\n return False\n return True\n except NoResultFound:\n pass\n\n provider_auth = AuthenticationProvider(provider=provider_name, provider_id=user_provider_id)\n user.providers.append(provider_auth)\n pyramid_basemodel.Session.flush()\n return True\n\n def __call__(self):\n \"\"\"\n Action provides social authorization functionality.\n\n When authorization with facebook or twitter is done successfully\n action tries to find existing user in database,\n if it exists - login this user, otherwise creates new user.\n \"\"\"\n context = self.request.context\n response_values = {'status': False, \n 'csrf_token': self.request.session.get_csrf_token()}\n user = self.request.user\n if user:\n return self._connect_user(response_values)\n else:\n try:\n user = pyramid_basemodel.Session.query(User).join(AuthenticationProvider).filter(AuthenticationProvider.provider == context.provider_name, AuthenticationProvider.provider_id == context.profile['accounts'][0]['userid']).one()\n except NoResultFound:\n user = None\n\n if not user:\n user = self._register_user()\n try:\n self.request.registry.notify(AfterSocialRegister(self.request, user, context.profile, response_values))\n except HTTPRedirection as redirect:\n return redirect\n\n try:\n self.request.registry.notify(AfterSocialLogIn(self.request, user, context.profile))\n except HTTPRedirection as redirect:\n return self.request.login_perform(user, location=redirect.location)\n\n return self.request.login_perform(user)\n return\n\n def _connect_user(self, response_values):\n \"\"\"\n Connect user to social account.\n\n :param dict response_values:\n\n :returns: response values with any message,\n or HTTPRedirection if raised in SocialAccountAlreadyConnected\n :rtype: dict\n \"\"\"\n context = self.request.context\n user = self.request.user\n try:\n if not self.set_provider(user, context.provider_name, context.profile['accounts'][0]['userid']):\n response_values['msg'] = self.request._('Your account is already connected to other ${provider} account.', domain='pyramid_fullauth', mapping={'provider': context.provider_name})\n else:\n response_values['status'] = True\n response_values['msg'] = self.request._('Your account has been connected to ${provider} account.', domain='pyramid_fullauth', mapping={'provider': context.provider_name})\n except IntegrityError:\n response_values['msg'] = self.request._('This ${provider} account is already connected with other account.', domain='pyramid_fullauth', mapping={'provider': context.provider_name})\n try:\n self.request.registry.notify(SocialAccountAlreadyConnected(self.request, user, context.profile, response_values))\n except HTTPRedirection as redirect:\n return redirect\n\n return response_values\n\n def _register_user(self):\n \"\"\"Actually register new user in the system based on context values.\"\"\"\n context = self.request.context\n email = self._email_from_context(context)\n try:\n user = pyramid_basemodel.Session.query(User).filter(User.email == email).one()\n if not self.set_provider(user, context.provider_name, context.profile['accounts'][0]['userid']):\n logger.debug(('Authenticated {user.id} connected to\\n {context.provider_name} id {connected_id},\\n with {userid}').format(user=user, context=context, connected_id=user.provider_id(context.provider_name), userid=context.profile['accounts'][0]['userid']))\n pyramid_basemodel.Session.flush()\n except NoResultFound:\n length_min = self.config.register.password.length_min\n user = User(email=email, password=tools.password_generator(length_min), address_ip=self.request.remote_addr)\n self.request.registry.notify(BeforeSocialRegister(self.request, user, context.profile))\n self.set_provider(user, context.provider_name, context.profile['accounts'][0]['userid'])\n pyramid_basemodel.Session.add(user)\n pyramid_basemodel.Session.flush()\n user.is_active = True\n\n return user\n\n def _email_from_context(self, context):\n \"\"\"\n Extract or generate email from context values.\n\n :param velruse.AuthenticationComplete context: velruse context\n \"\"\"\n if 'verifiedEmail' in context.profile:\n return context.profile['verifiedEmail']\n else:\n if 'emails' in context.profile and context.profile['emails'] and 'value' in context.profile['emails'][0]:\n return context.profile['emails'][0]['value']\n return text_type(('{0}@{1}').format(context.profile['accounts'][0]['userid'], context.profile['accounts'][0]['domain']))", "sub_path": "pycfiles/pyramid_game_offers-0.2.13.tar/social.py", "file_name": "social.py", "file_ext": "py", "file_size_in_byte": 7411, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "pyramid_fullauth.views.BaseView", "line_number": 26, "usage_type": "name"}, {"api_name": "pyramid_basemodel.Session.query", "line_number": 45, "usage_type": "call"}, {"api_name": "pyramid_fullauth.models.AuthenticationProvider", "line_number": 45, "usage_type": "argument"}, {"api_name": "pyramid_basemodel.Session", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pyramid_fullauth.models.AuthenticationProvider.user_id", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pyramid_fullauth.models.AuthenticationProvider.provider", "line_number": 45, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 49, "usage_type": "name"}, {"api_name": "pyramid_fullauth.models.AuthenticationProvider", "line_number": 52, "usage_type": "call"}, {"api_name": "pyramid_basemodel.Session.flush", "line_number": 54, "usage_type": "call"}, {"api_name": "pyramid_basemodel.Session", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pyramid_fullauth.models.AuthenticationProvider", "line_number": 73, "usage_type": "argument"}, {"api_name": "pyramid_basemodel.Session.query", "line_number": 73, "usage_type": "call"}, {"api_name": "pyramid_fullauth.models.User", "line_number": 73, "usage_type": "argument"}, {"api_name": "pyramid_basemodel.Session", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pyramid_fullauth.models.AuthenticationProvider.provider", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pyramid_fullauth.models.AuthenticationProvider.provider_id", "line_number": 73, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 74, "usage_type": "name"}, {"api_name": "pyramid_fullauth.events.AfterSocialRegister", "line_number": 80, "usage_type": "call"}, {"api_name": "pyramid.httpexceptions.HTTPRedirection", "line_number": 81, "usage_type": "name"}, {"api_name": "pyramid_fullauth.events.AfterSocialLogIn", "line_number": 85, "usage_type": "call"}, {"api_name": "pyramid.httpexceptions.HTTPRedirection", "line_number": 86, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 110, "usage_type": "name"}, {"api_name": "pyramid_fullauth.events.SocialAccountAlreadyConnected", "line_number": 113, "usage_type": "call"}, {"api_name": "pyramid.httpexceptions.HTTPRedirection", "line_number": 114, "usage_type": "name"}, {"api_name": "pyramid_basemodel.Session.query", "line_number": 124, "usage_type": "call"}, {"api_name": "pyramid_fullauth.models.User", "line_number": 124, "usage_type": "argument"}, {"api_name": "pyramid_basemodel.Session", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pyramid_fullauth.models.User.email", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pyramid_basemodel.Session.flush", "line_number": 127, "usage_type": "call"}, {"api_name": "pyramid_basemodel.Session", "line_number": 127, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 128, "usage_type": "name"}, {"api_name": "pyramid_fullauth.models.User", "line_number": 130, "usage_type": "call"}, {"api_name": "pyramid_fullauth.tools.password_generator", "line_number": 130, "usage_type": "call"}, {"api_name": "pyramid_fullauth.tools", "line_number": 130, "usage_type": "name"}, {"api_name": "pyramid_fullauth.events.BeforeSocialRegister", "line_number": 131, "usage_type": "call"}, {"api_name": "pyramid_basemodel.Session.add", "line_number": 133, "usage_type": "call"}, {"api_name": "pyramid_basemodel.Session", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pyramid_basemodel.Session.flush", "line_number": 134, "usage_type": "call"}, {"api_name": "pyramid_basemodel.Session", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pyramid.compat.text_type", "line_number": 150, "usage_type": "call"}, {"api_name": "pyramid.view.view_config", "line_number": 25, "usage_type": "call"}, {"api_name": "pyramid.security.NO_PERMISSION_REQUIRED", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "441758459", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom theano.tensor.signal import pool\nfrom theano.tensor.nnet import conv2d\nimport theano.tensor as T\nimport numpy as np\nimport theano\n\nclass LeNetConvPoolLayer(object):\n\n def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2,2), name='Model', params=None):\n if params != None:\n self.W = theano.shared(params[str(name) + \"_W_in\"], name=str(name) + \"_W_in\", borrow=True)\n self.b = theano.shared(params[str(name) + \"_b_in\"], name=str(name) + \"_b_in\", borrow=True)\n else:\n assert image_shape[1] == filter_shape[1]\n self.input = input\n\n fan_in = np.prod(filter_shape[1:])\n fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) // np.prod(poolsize))\n W_bound = np.sqrt(6. / (fan_in + fan_out))\n self.W = theano.shared(\n np.asarray(\n rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),\n dtype=np.float32\n ),\n name=str(name) + '_W_in',\n borrow=True\n )\n\n b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)\n self.b = theano.shared(\n value=b_values,\n name=str(name) + '_b_in',\n borrow=True)\n\n conv_out = conv2d(\n input=input,\n filters=self.W,\n filter_shape=filter_shape,\n input_shape=image_shape\n )\n\n pooled_out = pool.pool_2d(\n input=conv_out,\n ws=poolsize,\n ignore_border=True\n )\n\n self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))\n\n self.params = []\n self.params.extend([self.W, self.b])\n\n def get_params(self):\n params = {}\n for param in self.params:\n params[param.name] = param.get_value()\n return params\n", "sub_path": "deeplearning/src/theano/lenet_conv_pool_layer.py", "file_name": "lenet_conv_pool_layer.py", "file_ext": "py", "file_size_in_byte": 1952, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "theano.shared", "line_number": 14, "usage_type": "call"}, {"api_name": "theano.shared", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 22, "usage_type": "call"}, {"api_name": "theano.shared", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "theano.config", "line_number": 32, "usage_type": "attribute"}, {"api_name": "theano.shared", "line_number": 33, "usage_type": "call"}, {"api_name": "theano.tensor.nnet.conv2d", "line_number": 38, "usage_type": "call"}, {"api_name": "theano.tensor.signal.pool.pool_2d", "line_number": 45, "usage_type": "call"}, {"api_name": "theano.tensor.signal.pool", "line_number": 45, "usage_type": "name"}, {"api_name": "theano.tensor.tanh", "line_number": 51, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "587576535", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.mail import EmailMessage\nfrom django.db.models import Q\nfrom django.shortcuts import render, get_object_or_404\n\n# Create your views here.\nfrom django.shortcuts import render\nfrom django.contrib.auth import login\nfrom django.http import HttpResponseRedirect, Http404, HttpResponse\nfrom django import forms\nfrom django.template.loader import render_to_string\nfrom django.utils.encoding import force_bytes\n\nfrom django.utils.encoding import force_str\n\n\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.views.generic import TemplateView, FormView, CreateView, ListView, DeleteView, UpdateView\nfrom django.contrib.auth import get_user_model\nfrom django_filters.views import FilterView\n\n# from info.filter import SkillFilter\nfrom info.token import account_activation_token\nfrom worker.models import WorkerProfile, WorkerSkill, ContractorProfile, HireWorker\nfrom info.permisionmixin import WorkerCheckMixin, ContractorCheckMixin\n\nUser = get_user_model()\n\nfrom .forms import UserRegistrationForm, LoginForm, ProfileForm, WorkerSkillForm, ContractorProfileForm, HireSkillForm\n\n\ndef home(request):\n return render(request, 'index.html')\n\ndef activate(request, uidb64, token):\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n login(request, user)\n # return redirect('home')\n return HttpResponseRedirect('/')\n else:\n return HttpResponse('Activation link is invalid!')\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegistrationForm(request.POST)\n if form.is_valid():\n userObj = form.cleaned_data\n phone_number = userObj['phone_number']\n first_name = userObj['first_name']\n last_name = userObj['last_name']\n password = userObj['password']\n email = userObj['email']\n type = userObj['type']\n if not User.objects.filter(phone_number=phone_number).exists():\n user = User.objects.create_user(phone_number,first_name,last_name,email,type, password)\n # login(request, user)\n current_site = get_current_site(request)\n mail_subject = 'Activate your account.'\n message = render_to_string('acc_active_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user),\n })\n to_email = form.cleaned_data.get('email')\n email = EmailMessage(\n mail_subject, message, to=[to_email]\n )\n # email.send()\n user.is_active = True\n user.save()\n return HttpResponseRedirect('/')\n else:\n raise forms.ValidationError('Looks like a username with that email or password already exists')\n else:\n return render(request, 'registration.html', {'form': form})\n else:\n form = UserRegistrationForm()\n return render(request, 'registration.html', {'form' : form})\n\ndef loginuser(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n userObj = form.cleaned_data\n phone_number = userObj['phone_number']\n password = userObj['password']\n try:\n user = User.objects.get(phone_number = phone_number)\n except:\n form.add_error(None, 'User not found.')\n return render(request, 'login.html', {'form' : form})\n if not user.check_password(password):\n form.add_error(None, 'wrong password')\n return render(request, 'login.html', {'form' : form})\n if not user.is_active:\n form.add_error(None, 'You are not activated. Contact to admin.')\n return render(request, 'login.html', {'form' : form})\n login(request, user)\n return HttpResponseRedirect('/')\n else:\n form = LoginForm()\n return render(request, 'login.html', {'form' : form})\n\ndef aboutus(request):\n return render(request, 'aboutus.html')\n\ndef contactus(request):\n return render(request, 'contactus.html')\n\nclass SkillCreate(WorkerCheckMixin, FormView):\n\n template_name = 'Skill.html'\n form_class = WorkerSkillForm\n success_url = '/skill/'\n\n def post(self, request, *args, **kwargs):\n return super(SkillCreate, self).post(request,*args,**kwargs)\n\n def form_valid(self, form):\n rate_per_day = form.cleaned_data['rate_per_day']\n skill = form.cleaned_data.get('skill').capitalize()\n obj, created = WorkerSkill.objects.get_or_create(user=self.request.user,skill=skill,\n rate_per_day=rate_per_day)\n obj.experience = form.cleaned_data['experience']\n obj.save()\n return super(SkillCreate, self).form_valid(form)\n\nclass SkillUpdate(WorkerCheckMixin, UpdateView):\n template_name = 'Skill.html'\n form_class = WorkerSkillForm\n success_url = '/skill/'\n queryset = WorkerSkill.objects.all()\n\n def get(self, request, *args, **kwargs):\n return super(SkillUpdate, self).get(request, *args, **kwargs)\n\nclass SkillListView(WorkerCheckMixin, ListView):\n\n template_name = 'skilllist.html'\n queryset = WorkerSkill.objects.all().order_by('-created_at')\n paginate_by = 10\n\n\n def get_queryset(self):\n return super(SkillListView, self).get_queryset().filter(user=self.request.user)\n\nclass DeleteSkillView(WorkerCheckMixin, DeleteView):\n model = WorkerSkill\n template_name = 'confirm_delete.html'\n success_url = '/skill/'\n\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n if self.object.user == request.user:\n self.object.delete()\n return HttpResponseRedirect(self.get_success_url())\n else:\n raise Http404 #or return HttpResponse('404_url')\n\nclass GetProfile(TemplateView):\n\n template_name = 'worker_profile_show.html'\n\n def get_context_data(self, **kwargs):\n context = super(GetProfile, self).get_context_data(**kwargs)\n if self.request.user.is_contractor():\n if ContractorProfile.objects.filter(user=self.request.user).exists():\n context['profile'] = ContractorProfile.objects.get(user=self.request.user)\n return context\n if WorkerProfile.objects.filter(user=self.request.user).exists():\n context['profile'] = WorkerProfile.objects.get(user=self.request.user)\n return context\n\n\nclass CreateProfile(WorkerCheckMixin, FormView):\n form_class = ProfileForm\n template_name = 'worker_profile.html'\n success_url = '/'\n\n def get_form(self, form_class = ProfileForm):\n \"\"\"\n Check if the user already saved contact details. If so, then show\n the form populated with those details, to let user change them.\n \"\"\"\n if self.request.user.is_worker:\n try:\n contact = WorkerProfile.objects.get(user=self.request.user)\n return form_class(instance=contact, **self.get_form_kwargs())\n except WorkerProfile.DoesNotExist:\n return form_class(**self.get_form_kwargs())\n\n def post(self, request, *args, **kwargs):\n return super(CreateProfile, self).post(request, *args, **kwargs)\n\n def form_invalid(self, form):\n return super(CreateProfile, self).form_invalid(form)\n\n def form_valid(self, form):\n obj , created = WorkerProfile.objects.get_or_create(user=self.request.user)\n obj.city = form.cleaned_data.get('city')\n obj.district = form.cleaned_data.get('district')\n obj.state = form.cleaned_data.get('state')\n obj.address = form.cleaned_data.get('address')\n obj.picture = form.cleaned_data.get('picture')\n obj.about = form.cleaned_data.get('about')\n obj.save()\n return super(CreateProfile, self).form_valid(form)\n\n\nclass CreateContractorProfile(ContractorCheckMixin, FormView):\n form_class = ContractorProfileForm\n template_name = 'worker_profile.html'\n success_url = '/'\n\n def get_form(self, form_class = ContractorProfileForm):\n \"\"\"\n Check if the user already saved contact details. If so, then show\n the form populated with those details, to let user change them.\n \"\"\"\n if self.request.user.is_worker:\n try:\n contact = ContractorProfile.objects.get(user=self.request.user)\n return form_class(instance=contact, **self.get_form_kwargs())\n except ContractorProfile.DoesNotExist:\n return form_class(**self.get_form_kwargs())\n\n def post(self, request, *args, **kwargs):\n return super(CreateContractorProfile, self).post(request, *args, **kwargs)\n\n def form_invalid(self, form):\n return super(CreateContractorProfile, self).form_invalid(form)\n\n def form_valid(self, form):\n obj , created = ContractorProfile.objects.get_or_create(user=self.request.user)\n obj.city = form.cleaned_data.get('city')\n obj.district = form.cleaned_data.get('district')\n obj.state = form.cleaned_data.get('state')\n obj.address = form.cleaned_data.get('address')\n obj.picture = form.cleaned_data.get('picture')\n obj.about = form.cleaned_data.get('about')\n obj.licence = form.cleaned_data.get('licence')\n obj.save()\n return super(CreateContractorProfile, self).form_valid(form)\n\n\nclass WorkerSkillListView(ListView, FilterView):\n\n paginate_by = 10\n model = WorkerSkill\n queryset = WorkerSkill.objects.filter(status='N').order_by('-created_at')\n template_name = 'worker_skill_list_filter.html'\n # filterset_class = SkillFilter\n\n def get_queryset(self):\n queryset =super(WorkerSkillListView, self).get_queryset()\n remove_id = []\n for qs in queryset:\n if not qs.user.is_profile_compele():\n remove_id.append(qs.id)\n queryset = queryset.exclude(id__in= remove_id)\n skill = self.request.GET.get('skill')\n if skill:\n queryset = queryset.filter(skill__icontains = skill)\n city = self.request.GET.get('city')\n if city:\n queryset = queryset.filter(user__rel_worker_profile__city__icontains = city)\n name = self.request.GET.get('name')\n if name :\n queryset = queryset.filter(user__first_name__icontains = name)\n experience = self.request.GET.get('experience')\n if experience:\n queryset = queryset.filter(experience__gte = int(experience))\n\n return queryset\n\nclass HireWorkerView(FormView):\n\n template_name = 'hire_worker_profile.html'\n form_class = HireSkillForm\n success_url = '/hired-request/'\n\n def get_context_data(self, **kwargs):\n context = super(HireWorkerView, self).get_context_data(**kwargs)\n if self.request.method == 'GET':\n skill_id = self.request.resolver_match.kwargs['pk']\n WorkerSkill.objects.filter(id = skill_id)\n obj = WorkerSkill.objects.filter(id=skill_id).first()\n if WorkerProfile.objects.filter(user=obj.user).exists():\n context['profile'] = WorkerProfile.objects.get(user=obj.user)\n context['form'] = HireSkillForm\n context['id'] = self.request.resolver_match.kwargs.get('pk', None)\n return context\n\n\n def get(self, request, *args, **kwargs):\n return super(HireWorkerView, self).get(request, *args)\n\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n if not self.request.user.is_profile_compele():\n form.add_error(None, 'Error!! Complete your profile first.Until complete profile you cannot hire any worker')\n context = self.get_context_data(**kwargs)\n skill_id = self.request.resolver_match.kwargs['pk']\n WorkerSkill.objects.filter(id=skill_id)\n obj = WorkerSkill.objects.filter(id=skill_id).first()\n if WorkerProfile.objects.filter(user=obj.user).exists():\n context['profile'] = WorkerProfile.objects.get(user=obj.user)\n context['form'] = HireSkillForm\n context['id'] = self.request.resolver_match.kwargs.get('pk', None)\n context['form'] = form\n return self.render_to_response(context)\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form, **kwargs)\n\n def form_valid(self, form):\n user = self.request.user\n workskill_id = self.request.resolver_match.kwargs.get('pk')\n from_date = form.cleaned_data['from_date']\n to_date = form.cleaned_data['to_date']\n # HireWorker.objects.filter(hired_by=request.user, skill_id=workskill_id, fr)\n ho = HireWorker.objects.get_or_create(hired_by=user, skill_id=workskill_id, from_date=from_date,\n to_date=to_date)\n return super(HireWorkerView, self).form_valid(form)\n\n def form_invalid(self, form, **kwargs):\n context = self.get_context_data(**kwargs)\n skill_id = self.request.resolver_match.kwargs['pk']\n WorkerSkill.objects.filter(id=skill_id)\n obj = WorkerSkill.objects.filter(id=skill_id).first()\n if WorkerProfile.objects.filter(user=obj.user).exists():\n context['profile'] = WorkerProfile.objects.get(user=obj.user)\n context['form'] = HireSkillForm\n context['id'] = self.request.resolver_match.kwargs.get('pk', None)\n context['form'] = form\n return self.render_to_response(context)\n\nclass HireRequestList(ListView):\n\n template_name = 'hire-request.html'\n queryset = HireWorker.objects.all()\n paginate_by = 10\n\n def get_queryset(self):\n if self.request.user.is_worker():\n return super(HireRequestList, self).get_queryset().filter(skill__user = self.request.user)\n return super(HireRequestList, self).get_queryset().filter(hired_by = self.request.user)\n\ndef confirm_hire_worker(request,pk):\n obj = get_object_or_404(HireWorker, pk=pk)\n obj.status = 'confirm'\n obj.save()\n return HttpResponseRedirect('/hired-request/')", "sub_path": "info/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 14815, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "django.utils.encoding.force_str", "line_number": 40, "usage_type": "call"}, {"api_name": "django.utils.http.urlsafe_base64_decode", "line_number": 40, "usage_type": "call"}, {"api_name": "info.token.account_activation_token.check_token", "line_number": 44, "usage_type": "call"}, {"api_name": "info.token.account_activation_token", "line_number": 44, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 47, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 49, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 51, "usage_type": "call"}, {"api_name": "forms.UserRegistrationForm", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.sites.shortcuts.get_current_site", "line_number": 67, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 69, "usage_type": "call"}, {"api_name": "django.utils.http.urlsafe_base64_encode", "line_number": 72, "usage_type": "call"}, {"api_name": "django.utils.encoding.force_bytes", "line_number": 72, "usage_type": "call"}, {"api_name": "info.token.account_activation_token.make_token", "line_number": 73, "usage_type": "call"}, {"api_name": "info.token.account_activation_token", "line_number": 73, "usage_type": "name"}, {"api_name": "django.core.mail.EmailMessage", "line_number": 76, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 82, "usage_type": "call"}, {"api_name": "django.forms.ValidationError", "line_number": 84, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 84, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 86, "usage_type": "call"}, {"api_name": "forms.UserRegistrationForm", "line_number": 88, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 89, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 93, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 102, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 105, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 108, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 109, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 110, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 112, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 113, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 116, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 119, "usage_type": "call"}, {"api_name": "info.permisionmixin.WorkerCheckMixin", "line_number": 121, "usage_type": "name"}, {"api_name": "django.views.generic.FormView", "line_number": 121, "usage_type": "name"}, {"api_name": "forms.WorkerSkillForm", "line_number": 124, "usage_type": "name"}, {"api_name": "worker.models.WorkerSkill.objects.get_or_create", "line_number": 133, "usage_type": "call"}, {"api_name": "worker.models.WorkerSkill.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerSkill", "line_number": 133, "usage_type": "name"}, {"api_name": "info.permisionmixin.WorkerCheckMixin", "line_number": 139, "usage_type": "name"}, {"api_name": "django.views.generic.UpdateView", "line_number": 139, "usage_type": "name"}, {"api_name": "forms.WorkerSkillForm", "line_number": 141, "usage_type": "name"}, {"api_name": "worker.models.WorkerSkill.objects.all", "line_number": 143, "usage_type": "call"}, {"api_name": "worker.models.WorkerSkill.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerSkill", "line_number": 143, "usage_type": "name"}, {"api_name": "info.permisionmixin.WorkerCheckMixin", "line_number": 148, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 148, "usage_type": "name"}, {"api_name": "worker.models.WorkerSkill.objects.all", "line_number": 151, "usage_type": "call"}, {"api_name": "worker.models.WorkerSkill.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerSkill", "line_number": 151, "usage_type": "name"}, {"api_name": "info.permisionmixin.WorkerCheckMixin", "line_number": 158, "usage_type": "name"}, {"api_name": "django.views.generic.DeleteView", "line_number": 158, "usage_type": "name"}, {"api_name": "worker.models.WorkerSkill", "line_number": 159, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 167, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 169, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 171, "usage_type": "name"}, {"api_name": "worker.models.ContractorProfile.objects.filter", "line_number": 178, "usage_type": "call"}, {"api_name": "worker.models.ContractorProfile.objects", "line_number": 178, "usage_type": "attribute"}, {"api_name": "worker.models.ContractorProfile", "line_number": 178, "usage_type": "name"}, {"api_name": "worker.models.ContractorProfile.objects.get", "line_number": 179, "usage_type": "call"}, {"api_name": "worker.models.ContractorProfile.objects", "line_number": 179, "usage_type": "attribute"}, {"api_name": "worker.models.ContractorProfile", "line_number": 179, "usage_type": "name"}, {"api_name": "worker.models.WorkerProfile.objects.filter", "line_number": 181, "usage_type": "call"}, {"api_name": "worker.models.WorkerProfile.objects", "line_number": 181, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerProfile", "line_number": 181, "usage_type": "name"}, {"api_name": "worker.models.WorkerProfile.objects.get", "line_number": 182, "usage_type": "call"}, {"api_name": "worker.models.WorkerProfile.objects", "line_number": 182, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerProfile", "line_number": 182, "usage_type": "name"}, {"api_name": "info.permisionmixin.WorkerCheckMixin", "line_number": 186, "usage_type": "name"}, {"api_name": "django.views.generic.FormView", "line_number": 186, "usage_type": "name"}, {"api_name": "forms.ProfileForm", "line_number": 187, "usage_type": "name"}, {"api_name": "forms.ProfileForm", "line_number": 191, "usage_type": "name"}, {"api_name": "worker.models.WorkerProfile.objects.get", "line_number": 198, "usage_type": "call"}, {"api_name": "worker.models.WorkerProfile.objects", "line_number": 198, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerProfile", "line_number": 198, "usage_type": "name"}, {"api_name": "worker.models.WorkerProfile.DoesNotExist", "line_number": 200, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerProfile", "line_number": 200, "usage_type": "name"}, {"api_name": "worker.models.WorkerProfile.objects.get_or_create", "line_number": 210, "usage_type": "call"}, {"api_name": "worker.models.WorkerProfile.objects", "line_number": 210, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerProfile", "line_number": 210, "usage_type": "name"}, {"api_name": "info.permisionmixin.ContractorCheckMixin", "line_number": 221, "usage_type": "name"}, {"api_name": "django.views.generic.FormView", "line_number": 221, "usage_type": "name"}, {"api_name": "forms.ContractorProfileForm", "line_number": 222, "usage_type": "name"}, {"api_name": "forms.ContractorProfileForm", "line_number": 226, "usage_type": "name"}, {"api_name": "worker.models.ContractorProfile.objects.get", "line_number": 233, "usage_type": "call"}, {"api_name": "worker.models.ContractorProfile.objects", "line_number": 233, "usage_type": "attribute"}, {"api_name": "worker.models.ContractorProfile", "line_number": 233, "usage_type": "name"}, {"api_name": "worker.models.ContractorProfile.DoesNotExist", "line_number": 235, "usage_type": "attribute"}, {"api_name": "worker.models.ContractorProfile", "line_number": 235, "usage_type": "name"}, {"api_name": "worker.models.ContractorProfile.objects.get_or_create", "line_number": 245, "usage_type": "call"}, {"api_name": "worker.models.ContractorProfile.objects", "line_number": 245, "usage_type": "attribute"}, {"api_name": "worker.models.ContractorProfile", "line_number": 245, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 257, "usage_type": "name"}, {"api_name": "django_filters.views.FilterView", "line_number": 257, "usage_type": "name"}, {"api_name": "worker.models.WorkerSkill", "line_number": 260, "usage_type": "name"}, {"api_name": "worker.models.WorkerSkill.objects.filter", "line_number": 261, "usage_type": "call"}, {"api_name": "worker.models.WorkerSkill.objects", "line_number": 261, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerSkill", "line_number": 261, "usage_type": "name"}, {"api_name": "django.views.generic.FormView", "line_number": 287, "usage_type": "name"}, {"api_name": "forms.HireSkillForm", "line_number": 290, "usage_type": "name"}, {"api_name": "worker.models.WorkerSkill.objects.filter", "line_number": 297, "usage_type": "call"}, {"api_name": "worker.models.WorkerSkill.objects", "line_number": 297, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerSkill", "line_number": 297, "usage_type": "name"}, {"api_name": "worker.models.WorkerSkill.objects.filter", "line_number": 298, "usage_type": "call"}, {"api_name": "worker.models.WorkerSkill.objects", "line_number": 298, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerSkill", "line_number": 298, "usage_type": "name"}, {"api_name": "worker.models.WorkerProfile.objects.filter", "line_number": 299, "usage_type": "call"}, {"api_name": "worker.models.WorkerProfile.objects", "line_number": 299, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerProfile", "line_number": 299, "usage_type": "name"}, {"api_name": "worker.models.WorkerProfile.objects.get", "line_number": 300, "usage_type": "call"}, {"api_name": "worker.models.WorkerProfile.objects", "line_number": 300, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerProfile", "line_number": 300, "usage_type": "name"}, {"api_name": "forms.HireSkillForm", "line_number": 301, "usage_type": "name"}, {"api_name": "worker.models.WorkerSkill.objects.filter", "line_number": 316, "usage_type": "call"}, {"api_name": "worker.models.WorkerSkill.objects", "line_number": 316, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerSkill", "line_number": 316, "usage_type": "name"}, {"api_name": "worker.models.WorkerSkill.objects.filter", "line_number": 317, "usage_type": "call"}, {"api_name": "worker.models.WorkerSkill.objects", "line_number": 317, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerSkill", "line_number": 317, "usage_type": "name"}, {"api_name": "worker.models.WorkerProfile.objects.filter", "line_number": 318, "usage_type": "call"}, {"api_name": "worker.models.WorkerProfile.objects", "line_number": 318, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerProfile", "line_number": 318, "usage_type": "name"}, {"api_name": "worker.models.WorkerProfile.objects.get", "line_number": 319, "usage_type": "call"}, {"api_name": "worker.models.WorkerProfile.objects", "line_number": 319, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerProfile", "line_number": 319, "usage_type": "name"}, {"api_name": "forms.HireSkillForm", "line_number": 320, "usage_type": "name"}, {"api_name": "worker.models.HireWorker.objects.get_or_create", "line_number": 335, "usage_type": "call"}, {"api_name": "worker.models.HireWorker.objects", "line_number": 335, "usage_type": "attribute"}, {"api_name": "worker.models.HireWorker", "line_number": 335, "usage_type": "name"}, {"api_name": "worker.models.WorkerSkill.objects.filter", "line_number": 342, "usage_type": "call"}, {"api_name": "worker.models.WorkerSkill.objects", "line_number": 342, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerSkill", "line_number": 342, "usage_type": "name"}, {"api_name": "worker.models.WorkerSkill.objects.filter", "line_number": 343, "usage_type": "call"}, {"api_name": "worker.models.WorkerSkill.objects", "line_number": 343, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerSkill", "line_number": 343, "usage_type": "name"}, {"api_name": "worker.models.WorkerProfile.objects.filter", "line_number": 344, "usage_type": "call"}, {"api_name": "worker.models.WorkerProfile.objects", "line_number": 344, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerProfile", "line_number": 344, "usage_type": "name"}, {"api_name": "worker.models.WorkerProfile.objects.get", "line_number": 345, "usage_type": "call"}, {"api_name": "worker.models.WorkerProfile.objects", "line_number": 345, "usage_type": "attribute"}, {"api_name": "worker.models.WorkerProfile", "line_number": 345, "usage_type": "name"}, {"api_name": "forms.HireSkillForm", "line_number": 346, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 351, "usage_type": "name"}, {"api_name": "worker.models.HireWorker.objects.all", "line_number": 354, "usage_type": "call"}, {"api_name": "worker.models.HireWorker.objects", "line_number": 354, "usage_type": "attribute"}, {"api_name": "worker.models.HireWorker", "line_number": 354, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 363, "usage_type": "call"}, {"api_name": "worker.models.HireWorker", "line_number": 363, "usage_type": "argument"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 366, "usage_type": "call"}]} +{"seq_id": "88299691", "text": "# -*- coding:utf-8 -*-\nfrom __future__ import print_function\nimport glob\nimport os\nimport shutil\nfrom datetime import datetime\nimport csv\nimport unicodecsv\nimport json\nimport sqlite3\nfrom difflib import SequenceMatcher\nimport unicodecsv\n\n\"\"\"\nRead a specific columns of a csv\n\"\"\"\ndef csvreadercol(filename, idcol):\n # We handle the uvs csv\n rowdata = []\n with open(filename, 'rb') as csvfile:\n reader = unicodecsv.reader(csvfile, delimiter=',', quotechar='\"', encoding='utf-8')\n for row in reader:\n if row[idcol] != '':\n rowdata.append(row[idcol]) # yield ?\n return rowdata\n\n\n\"\"\"\nReturn the index of a csv (assume there is one)\n\"\"\"\ndef indexreader(filename): # if it exist !\n with open(filename, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n keys = next(reader)\n return keys\n\n\n\"\"\"\nCopy all the file/sqlitedb to a folder under /cache\n\"\"\"\ndef cacheDir():\n os.chdir(\".\")\n date = datetime.today()\n date = str(date.year) + \"-\" + str(date.month) + \"-\" + str(date.day) + \"-\" + str(date.hour) + \"h\" + str(date.minute)\n dirname = 'cache/'+date+\"/\"\n try:\n os.makedirs(dirname)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n for file in glob.glob(\"*.*\"):\n shutil.copy2(file, dirname)\n print(file)\n\n\n\n\n\n\"\"\"\nReturn the most similar school name, and the ratio of similarity\n\"\"\"\ndef seqmatch(name):\n ratioguess = 0\n for school in data:\n matchratio = SequenceMatcher(None, school['name'], name).ratio()\n if matchratio > ratioguess:\n schoolguess = school['name']\n ratioguess = matchratio\n return schoolguess, ratioguess\n \n\"\"\"\nCreate the csv schoolmatcher.csv\n\"\"\" \ndef initschoolsmatcher():\n json_data=open('schools.json')\n data = json.load(json_data)\n\n with open('schoolmatcher.csv', 'wb') as f:\n writer = unicodecsv.writer(f, quotechar='\"', encoding='utf-8')\n for row in branch:\n nameguess, ratioguess = seqmatch(row)\n writer.writerow([row, nameguess, ratioguess])\n\n\"\"\"\nTransform a csv into an sqlite3 db\nreturn the sql creation command\n\"\"\"\ndef csv2db(csvname, dbname):\n connection = sqlite3.connect(dbname)\n cursor = connection.cursor()\n\n # try...\n with open (csvname, 'r') as f:\n reader = unicodecsv.reader(f)\n columns = next(reader)\n csvname = csvname.split(\".\")[0] # to not take .csv #todo fix future bug...\n\n # we create the table\n createquery = 'CREATE TABLE ' + csvname + '({0});'\n createquery = createquery.format(','.join(columns))\n print(createquery)\n cursor.execute(createquery)\n\n # we insert data\n query = 'INSERT INTO '+ csvname +'({0}) values ({1})'\n query = query.format(','.join(columns), ','.join('?' * len(columns)))\n for data in reader:\n cursor.execute(query, data)\n connection.commit()\n cursor.close()\n\n\"\"\"\nTransform a json into an sqlite3 db\nreturn the sql creation command\n\"\"\"\ndef json2db(jsonname, dbname):\n jsonfile = json.load(open(jsonname))\n\n connection = sqlite3.connect(dbname)\n cursor = connection.cursor()\n\n columns = jsonfile[0].keys() # assume the first row as all the attributes\n jsonname = jsonname.split(\".\")[0]\n \n # we create the table\n createquery = 'CREATE TABLE ' + jsonname + '({0});'\n createquery = createquery.format(','.join(columns))\n print(createquery)\n cursor.execute(createquery) \n \n \n query = 'INSERT INTO '+ jsonname +'({0}) values ({1})'\n query = query.format(','.join(columns), ','.join('?' * len(columns)))\n for jsonobject in jsonfile[1:]:\n values = []\n for value in jsonobject.values():\n if isinstance(value, list):\n if len(value) == 1:\n values.append(value[0])\n else:\n values.append(str(value))\n else:\n values.append(value)\n cursor.execute(query, values) \n connection.commit()\n cursor.close()\n\n\n\"\"\"\nCreate the uttravel db from csv and json\nAssume that uttravel.db don't exist\n\"\"\"\ndef csvandjsontodb():\n csv2db('schoolmatcher.csv', 'uttravel.db')\n csv2db('branchmatcher.csv', 'uttravel.db')\n csv2db('uvs.csv', 'uttravel.db')\n csv2db('etudiants.csv', 'uttravel.db')\n json2db('schools.json', 'uttravel.db')\n json2db('schoolsV2.json', 'uttravel.db')\n json2db('docs.json', 'uttravel.db')\n\n\nif __name__ == '__main__':\n # cacheDir()\n csvandjsontodb()\n\n", "sub_path": "db/dbhandler.py", "file_name": "dbhandler.py", "file_ext": "py", "file_size_in_byte": 4630, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "unicodecsv.reader", "line_number": 21, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 33, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 51, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 52, "usage_type": "call"}, {"api_name": "difflib.SequenceMatcher", "line_number": 65, "usage_type": "call"}, {"api_name": "json.load", "line_number": 76, "usage_type": "call"}, {"api_name": "unicodecsv.writer", "line_number": 79, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 89, "usage_type": "call"}, {"api_name": "unicodecsv.reader", "line_number": 94, "usage_type": "call"}, {"api_name": "json.load", "line_number": 117, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "641074721", "text": "from django import template\r\nfrom django.urls import reverse\r\nfrom django.http import QueryDict\r\n\r\nregister = template.Library()\r\n\r\n\r\n@register.simple_tag()\r\ndef reverse_url(request, name, *args, **kwargs):\r\n \"\"\"\r\n 反向解析生成URL,拼接参数\r\n :return:\r\n \"\"\"\r\n # 根据传的url别名和参数反向解析生成基本的URL\r\n base_url = reverse(name, args=args, kwargs=kwargs)\r\n # 从当前的URL上获取参数 ——》 query=1&page=2\r\n params = request.GET.urlencode()\r\n # 如果没有条件 直接返回基本的URL\r\n if not params:\r\n return base_url\r\n # 返回带条件的URL\r\n return \"{}?{}\".format(base_url, params)\r\n\r\n\r\n@register.simple_tag()\r\ndef rev_url(request, name, *args, **kwargs):\r\n \"\"\"\r\n 反向解析生成URL,拼接参数\r\n :return:\r\n \"\"\"\r\n\r\n base_url = reverse(name, args=args, kwargs=kwargs)\r\n\r\n # 直接拿到当前的URL\r\n url = request.get_full_path()\r\n qd = QueryDict(mutable=True)\r\n qd['next'] = url\r\n return \"{}?{}\".format(base_url, qd.urlencode())\r\n", "sub_path": "crm/templatetags/url.py", "file_name": "url.py", "file_ext": "py", "file_size_in_byte": 1062, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.template.Library", "line_number": 5, "usage_type": "call"}, {"api_name": "django.template", "line_number": 5, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 32, "usage_type": "call"}, {"api_name": "django.http.QueryDict", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "475359144", "text": "import twitter\n\ndef get_authentication(screen_name=None,param=1):\n if screen_name==\"X\" and param==1:\n con_key='X'\n con_sec='X'\n acc_tok='X'\n acc_sec='X'\n \n api=twitter.Api(consumer_key=con_key,consumer_secret=con_sec,access_token_key=acc_tok,access_token_secret=acc_sec)\n return api\n\n\n\n \n \n\n\n", "sub_path": "Auth.py", "file_name": "Auth.py", "file_ext": "py", "file_size_in_byte": 338, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "twitter.Api", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "309948575", "text": "import pygame as pg\r\nimport random\r\nfrom os import path\r\nfrom settings import *\r\nfrom sprites import *\r\n\r\nclass Game:\r\n def __init__(self):\r\n # initialize game window, etc\r\n pg.init()\r\n pg.mixer.init()\r\n self.screen = pg.display.set_mode((WIDTH, HEIGHT))\r\n pg.display.set_caption(TITLE)\r\n self.clock = pg.time.Clock()\r\n self.load_data()\r\n self.running = True\r\n self.score = 0\r\n self.font_name = pg.font.match_font('arial')\r\n \r\n def draw_text(self, text, size, color, x, y, align=\"topleft\"):\r\n font = pg.font.Font(self.font_name, size)\r\n text_surface = font.render(text, True, color)\r\n text_rect = text_surface.get_rect()\r\n text_rect.center = (x, y)\r\n self.screen.blit(text_surface, text_rect)\r\n \r\n def load_data(self):\r\n game_folder = path.dirname(__file__)\r\n img_folder = path.join(game_folder, 'img')\r\n self.player_img = pg.image.load(path.join(img_folder, PLAYER_IMG)).convert_alpha()\r\n self.gold_img = pg.image.load(path.join(img_folder, GOLD_IMG)).convert_alpha()\r\n self.laser_img = pg.image.load(path.join(img_folder, LASER_IMG)).convert_alpha()\r\n self.meteor_images = []\r\n for i in range(1, 11):\r\n self.meteor_images.append(pg.image.load(path.join(img_folder, 'meteor{}.png'.format(i))).convert())\r\n self.player_img_d = pg.transform.scale(self.player_img, (15, 15))\r\n self.player_img_u = pg.transform.flip(self.player_img_d, False, True)\r\n self.player_img_r = pg.transform.rotate(self.player_img_d, 90)\r\n self.player_img_l = pg.transform.flip(self.player_img_r, True, False)\r\n self.laser_img_d = pg.transform.scale(self.laser_img, (3, 12))\r\n self.laser_img_u = pg.transform.flip(self.laser_img_d, False, True)\r\n self.laser_img_r = pg.transform.rotate(self.laser_img_d, 90)\r\n self.laser_img_l = pg.transform.flip(self.laser_img_r, True, False)\r\n self.gold_img = pg.transform.scale(self.gold_img, (15, 15))\r\n \r\n \r\n def new(self):\r\n # start a new game\r\n self.all_sprites = pg.sprite.Group()\r\n self.mobs = pg.sprite.Group()\r\n self.golds = pg.sprite.Group()\r\n self.portals = pg.sprite.Group()\r\n self.lasers = pg.sprite.Group()\r\n self.player = Player(self)\r\n self.gold = Gold(self)\r\n self.portal = Portal(self, 100, 100, 15, 15)\r\n self.portal2 = Portal(self, 300, 450, 15, 15)\r\n self.laser = Powerup(self, \"laser\")\r\n self.all_sprites.add(self.portal)\r\n self.portals.add(self.portal)\r\n self.all_sprites.add(self.portal2)\r\n self.all_sprites.add(self.player)\r\n self.all_sprites.add(self.gold)\r\n self.all_sprites.add(self.laser)\r\n self.golds.add(self.gold)\r\n self.num_of_mobs = 45\r\n if self.score >= 3:\r\n self.num_of_mobs = 55\r\n if self.score >= 9:\r\n self.num_of_mobs = 75\r\n for i in range(self.num_of_mobs):\r\n self.mob = Mob(self)\r\n self.mobs.add(self.mob)\r\n self.all_sprites.add(self.mob)\r\n self.draw_debug = False\r\n self.run()\r\n\r\n def run(self):\r\n # Game Loop\r\n self.playing = True\r\n while self.playing:\r\n self.clock.tick(FPS)\r\n self.events()\r\n self.update()\r\n self.draw()\r\n\r\n def update(self):\r\n # Game Loop - Update\r\n self.all_sprites.update()\r\n # mobs hit player\r\n hits = pg.sprite.spritecollide(self.player, self.mobs, False, pg.sprite.collide_mask)\r\n for hit in hits:\r\n self.score -= 1\r\n self.player.kill()\r\n self.new()\r\n # player hits gold\r\n hits = pg.sprite.collide_rect(self.player, self.gold)\r\n if hits:\r\n self.score += 1\r\n self.new()\r\n if self.score < 0:\r\n self.score = 0\r\n\r\n # player hits portal\r\n hits = pg.sprite.spritecollide(self.player, self.portals, False)\r\n for hit in hits:\r\n self.player.rect.center = self.portal2.rect.center\r\n self.player.pos = self.portal2.rect.center\r\n\r\n hits = pg.sprite.collide_rect(self.player, self.laser)\r\n if hits:\r\n self.laser.kill()\r\n self.player.laser += 1\r\n\r\n hits = pg.sprite.groupcollide(self.lasers, self.mobs, True, True)\r\n\r\n def events(self):\r\n # Game Loop - events\r\n for event in pg.event.get():\r\n # check for closing window\r\n if event.type == pg.QUIT:\r\n if self.playing:\r\n self.playing = False\r\n self.running = False\r\n if event.type == pg.KEYDOWN:\r\n if event.key == pg.K_h:\r\n self.draw_debug = not self.draw_debug\r\n\r\n def draw(self):\r\n # Game Loop - draw\r\n pg.display.set_caption(\"{:.2f}\".format(self.clock.get_fps()))\r\n self.screen.fill(BLACK)\r\n self.all_sprites.draw(self.screen)\r\n self.draw_text(str(self.score), 18, WHITE, WIDTH / 2, 10)\r\n for sprite in self.all_sprites:\r\n if self.draw_debug:\r\n pg.draw.rect(self.screen, CYAN, (sprite.rect), 1)\r\n \r\n # *after* drawing everything, flip the display\r\n pg.display.flip()\r\n\r\n def show_start_screen(self):\r\n # game splash/start screen\r\n self.screen.fill(BLACK)\r\n self.draw_text(TITLE, 48, YELLOW, WIDTH / 2, HEIGHT / 4)\r\n self.show_screen = 'main'\r\n self.buttons()\r\n pg.display.flip()\r\n self.wait_for_press()\r\n\r\n def show_in_screen(self):\r\n # game instructions\r\n self.screen.fill(BLACK)\r\n self.draw_text(\"Instructions\", 48, WHITE, WIDTH / 2, HEIGHT / 4)\r\n self.show_screen = 'instruction'\r\n self.chalbutton.rect.center = WIDTH / 4, HEIGHT * 7 / 8\r\n self.advebutton.rect.center = WIDTH * 3 / 4, HEIGHT * 7 / 8\r\n self.instbutton.kill()\r\n pg.display.flip()\r\n self.buttons.update()\r\n self.wait_for_press()\r\n\r\n def show_adve_screen(self):\r\n self.screen.fill(BLACK)\r\n self.show_screen = 'adventure'\r\n self.chalbutton.kill()\r\n self.advebutton.kill()\r\n self.instbutton.kill()\r\n self.level1button = Button(self, 80, HEIGHT / 2 - 40, 50, 50, 'level1')\r\n self.buttons.add(self.level1button)\r\n self.level1button.adve_texts()\r\n pg.display.flip()\r\n self.buttons.update()\r\n self.wait_for_press()\r\n \r\n def show_go_screen(self):\r\n # game over/continue\r\n pass\r\n\r\n def buttons(self):\r\n self.buttons = pg.sprite.Group()\r\n self.chalbutton = Button(self, WIDTH / 2, HEIGHT / 2, 115, 30, 'Challenge')\r\n self.advebutton = Button(self, WIDTH / 2, HEIGHT / 2 + 40, 115, 30, 'Adventure')\r\n self.instbutton = Button(self, WIDTH / 2, HEIGHT / 2 + 80, 115, 30, 'Instructions')\r\n self.buttons.add(self.chalbutton)\r\n self.buttons.add(self.advebutton)\r\n self.buttons.add(self.instbutton)\r\n self.buttons.draw(self.screen)\r\n self.chalbutton.main_texts()\r\n \r\n def wait_for_press(self):\r\n self.waiting = True\r\n while self.waiting:\r\n pg.display.set_caption(\"{:.2f}\".format(self.clock.get_fps()))\r\n pg.display.flip()\r\n self.clock.tick(FPS)\r\n for self.event in pg.event.get():\r\n if self.event.type == pg.QUIT:\r\n self.waiting = False\r\n self.running = False\r\n self.buttons.update()\r\n \r\ng = Game()\r\ng.show_start_screen()\r\nwhile g.running:\r\n g.show_go_screen()\r\n\r\npg.quit()\r\n", "sub_path": "Pygame/Gold/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7825, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pygame.init", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.font.match_font", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "name"}, {"api_name": "pygame.transform.scale", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pygame.sprite.collide_rect", "line_number": 97, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.sprite.collide_rect", "line_number": 110, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pygame.sprite.groupcollide", "line_number": 115, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 119, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 125, "usage_type": "attribute"}, {"api_name": "pygame.K_h", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 131, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 137, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 140, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 148, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 148, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 159, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 172, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 172, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 181, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 181, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 194, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 195, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 195, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 197, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 197, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 198, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 208, "usage_type": "call"}]} +{"seq_id": "299747074", "text": "import argparse\nimport os\nimport pdb\nimport sys\n\nimport gin\nimport torch\nfrom loader.metadata import ImagenetMetadata\nfrom loop2 import loop\nfrom nn.sampler2 import Sampler\nfrom torch.utils.tensorboard import SummaryWriter\nfrom utils import utils\nfrom utils.utils import MyDataParallel, prepare_dir, set_random_seed\n\n\nIMAGENET_DIR = '/st1/dataset/learn2sample/imagenet_l2s_84_84'\nDEVKIT_DIR = '/v9/whshin/imagenet/ILSVRC2012_devkit_t12'\nC = utils.getCudaManager('default')\n\nparser = argparse.ArgumentParser(description='Learning to sample')\nparser.add_argument('--cpu', action='store_true', help='disable CUDA')\nparser.add_argument('--volatile', action='store_true', help='no saved files.')\nparser.add_argument('--gin', type=str, default='test',\n help='gin filename to load configuration.')\nparser.add_argument('--parallel', action='store_true',\n help='use torh.nn.DataParallel')\nparser.add_argument('--visible_devices', nargs='+', type=int, default=None,\n help='for the environment variable: CUDA_VISIBLE_DEVICES')\nparser.add_argument('--seed', type=int, default=1, help='set random seed.')\n\n\n@gin.configurable\ndef meta_train(train_loop, valid_loop, test_loop, meta_epoch, tolerance,\n save_path, outer_optim, outer_lr):\n best_acc = 0\n no_improvement = 0\n\n # [ImageNet 1K] meta-train:100 / meta-valid:450 / meta-test:450 (classes)\n meta_data = ImagenetMetadata.load_or_make(\n data_dir=IMAGENET_DIR, devkit_dir=DEVKIT_DIR, remake=False)\n meta_train, meta_valid, meta_test = meta_data.split_classes((2, 4, 4))\n sampler = C(Sampler())\n # sampler.cuda_parallel_(dict(encoder=0, mask_gen=1), C.parallel)\n # sampler = MyDataParallel(sampler)\n # sampler.mask_gen = MyDataParallel(sampler.mask_gen)\n # sampler.mask_gen.data_parallel_recursive_()\n\n #####################################################################\n is_RL = False\n if not is_RL:\n outer_optim = {'sgd': 'SGD', 'adam': 'Adam'}[outer_optim.lower()]\n outer_optim = getattr(torch.optim, outer_optim)(\n sampler.parameters(), lr=outer_lr)\n #####################################################################\n\n if save_path:\n writer = SummaryWriter(os.path.join(save_path, 'tfevent'))\n\n for i in range(1, meta_epoch + 1):\n #####################################################################\n # meta train\n sampler, result_train = train_loop(\n data=meta_train,\n sampler=sampler,\n outer_optim=outer_optim,\n save_path=save_path,\n epoch=i,\n is_RL=is_RL\n )\n\n # meta valid\n _, result_valid = valid_loop(\n data=meta_valid,\n sampler=sampler,\n save_path=save_path,\n epoch=i,\n is_RL=is_RL\n )\n #####################################################################\n\n loss = result_valid.get_best_loss().mean()\n acc = result_valid.get_best_acc().mean()\n if save_path:\n # tensorboard\n writer.add_scalars('Loss/valid', {n: loss[n] for n in loss.index}, i)\n writer.add_scalars('Acc/valid', {n: acc[n] for n in acc.index}, i)\n # save numbers\n result_train.save_csv(f'records/train_{i}', save_path)\n result_valid.save_csv(f'records/valid_{i}', save_path)\n # update the best model\n if acc['ours'] > best_acc:\n if save_path:\n # TODO: find better way\n # why recursion error occurs if model is on GPU?\n sampler.cpu().save(save_path)\n sampler.cuda_parallel_(dict(encoder=0, mask_gen=1), C.parallel)\n else:\n best_sampler = sampler\n best_acc = acc['ours']\n print(f'Best accuracy update: {best_acc*100:6.2f}%')\n else:\n no_improvement += 1\n if no_improvement > tolerance:\n print(f'No improvments for {no_improvement} steps. Early-stopped.')\n break\n else:\n print(f'Early stop counter: {no_improvement}/{tolerance}.')\n\n if save_path:\n sampler = Sampler.load(save_path)\n sampler.cuda_parallel_(dict(encoder=0, mask_gen=1), C.parallel)\n else:\n sampler = best_sampler\n\n # meta test\n _, result_test = test_loop(\n data=meta_test,\n sampler=sampler,\n save_path=save_path)\n\n if save_path:\n result_test.save_csv('records/test', save_path)\n result_test.save_final_lineplot('loss_q_m', save_path)\n result_test.save_final_lineplot('acc_q_m', save_path)\n\n acc = result_test.get_best_acc()\n loss = result_test.get_best_loss()\n\n print(f'\\nFinal result:\\n')\n acc.save_mean_std('[Accuracy]', save_path)\n loss.save_mean_std('[Loss]', save_path)\n print('\\nend')\n\n\nif __name__ == '__main__':\n print('Start_of_program.')\n args = parser.parse_args()\n # set_random_seed(args.seed)\n C.set_cuda(not args.cpu and torch.cuda.is_available())\n if args.parallel:\n C.set_visible_devices(args.visible_devices)\n C.set_parallel()\n # gin\n gin_path = os.path.join('gin', args.gin + '.gin')\n gin.parse_config_file(gin_path)\n # prepare dir\n save_path = prepare_dir(gin_path) if not args.volatile else None\n # main loop\n meta_train(save_path=save_path)\n print('End_of_program.')\n", "sub_path": "train2.py", "file_name": "train2.py", "file_ext": "py", "file_size_in_byte": 5081, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "utils.utils.getCudaManager", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 18, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "loader.metadata.ImagenetMetadata.load_or_make", "line_number": 39, "usage_type": "call"}, {"api_name": "loader.metadata.ImagenetMetadata", "line_number": 39, "usage_type": "name"}, {"api_name": "nn.sampler2.Sampler", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 52, "usage_type": "attribute"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "nn.sampler2.Sampler.load", "line_number": 110, "usage_type": "call"}, {"api_name": "nn.sampler2.Sampler", "line_number": 110, "usage_type": "name"}, {"api_name": "gin.configurable", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 139, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "gin.parse_config_file", "line_number": 145, "usage_type": "call"}, {"api_name": "utils.utils.prepare_dir", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "46131355", "text": "import subprocess\nfrom enum import Enum\nfrom typing import List\n\nimport utilities.log as log\nfrom utilities.exceptions_handle import handle_called_process_error\n\n\nclass DockerArgumentType(Enum):\n SPLIT = 1\n NO_SPLIT = 2\n\n\nclass DockerArgument:\n def __init__(self, type: DockerArgumentType, value: str):\n self.type = type\n self.value = value\n\n type: DockerArgumentType\n value: str\n\n\ndef recreate_dev_volumes(webapi_project_path, nginx_certs_path, dotnet_configuration):\n dev_rozklad_app_volume_name = 'rozklad-app-dev-secrets'\n dev_nginx_certs_volume_name = 'rozklad-nginx-dev-certs'\n\n log.info(\"Recreating and populating volumes...\")\n\n log.info(\"Removing existed volumes\")\n call_docker([\n DockerArgument(\n value='volume rm rozklad-app-dev-secrets rozklad-nginx-dev-certs',\n type=DockerArgumentType.SPLIT\n )\n ], capture_output=True)\n\n log.info(\"Populating rozklad-app volume...\")\n try:\n call_docker([\n DockerArgument(\n value=f'run --rm -v {dev_rozklad_app_volume_name}:/volume '\n f'-v {webapi_project_path}/Properties:/properties '\n f'ubuntu cp /properties/secret.{dotnet_configuration}.json /volume',\n type=DockerArgumentType.SPLIT\n ),\n ], capture_output=True, check=True)\n except subprocess.CalledProcessError as error:\n handle_called_process_error(error, 'Error when populating rozklad volume')\n\n log.info(\"Populating rozklad-nginx volume...\")\n try:\n call_docker([\n DockerArgument(\n value=f'run --rm -v {dev_nginx_certs_volume_name}:/volume '\n f'-v {nginx_certs_path}/Development:/certs '\n f'ubuntu cp /certs/cert.pem /certs/key.pem /volume',\n type=DockerArgumentType.SPLIT\n )\n ], capture_output=True, check=True)\n except subprocess.CalledProcessError as error:\n handle_called_process_error(error, 'Error when populating nginx volume')\n\n\ndef get_args_list_from_docker_arguments(docker_arguments: List[DockerArgument]):\n result = []\n for docker_argument in docker_arguments:\n result.extend(extract_docker_argument(docker_argument))\n return result\n\n\ndef extract_docker_argument(argument: DockerArgument):\n if argument.type == DockerArgumentType.SPLIT:\n return argument.value.split(' ')\n if argument.type == DockerArgumentType.NO_SPLIT:\n return [argument.value]\n raise NotImplementedError\n\n\ndef call_docker_compose(docker_arguments: List[DockerArgument], **subprocess_kargs):\n result_command = [\"docker-compose\"]\n result_command.extend(get_args_list_from_docker_arguments(docker_arguments))\n return subprocess.run(result_command, **subprocess_kargs)\n\n\ndef call_docker(docker_arguments: List[DockerArgument], **subprocess_kargs):\n result_command = [\"docker\"]\n result_command.extend(get_args_list_from_docker_arguments(docker_arguments))\n return subprocess.run(result_command, **subprocess_kargs)\n", "sub_path": "docker-build/build-script-source/utilities/docker_wrapper.py", "file_name": "docker_wrapper.py", "file_ext": "py", "file_size_in_byte": 3058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "enum.Enum", "line_number": 9, "usage_type": "name"}, {"api_name": "utilities.log.info", "line_number": 27, "usage_type": "call"}, {"api_name": "utilities.log", "line_number": 27, "usage_type": "name"}, {"api_name": "utilities.log.info", "line_number": 29, "usage_type": "call"}, {"api_name": "utilities.log", "line_number": 29, "usage_type": "name"}, {"api_name": "utilities.log.info", "line_number": 37, "usage_type": "call"}, {"api_name": "utilities.log", "line_number": 37, "usage_type": "name"}, {"api_name": "subprocess.CalledProcessError", "line_number": 47, "usage_type": "attribute"}, {"api_name": "utilities.exceptions_handle.handle_called_process_error", "line_number": 48, "usage_type": "call"}, {"api_name": "utilities.log.info", "line_number": 50, "usage_type": "call"}, {"api_name": "utilities.log", "line_number": 50, "usage_type": "name"}, {"api_name": "subprocess.CalledProcessError", "line_number": 60, "usage_type": "attribute"}, {"api_name": "utilities.exceptions_handle.handle_called_process_error", "line_number": 61, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 79, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 82, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 85, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "430217691", "text": "# -----------------------------------------------------------------------------------------\n# Code taken from https://github.com/davidsandberg/facenet with modifications\n# -----------------------------------------------------------------------------------------\n\n# coding=utf-8\n\"\"\"Performs face detection in realtime.\n\nBased on code from https://github.com/shanren7/real_time_face_recognition\n\"\"\"\n# MIT License\n#\n# Copyright (c) 2017 François Gervais\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport argparse\nimport sys\nimport time\nimport mysql.connector as my\nimport cv2\n\nimport packages.face as face\n\n'''\nfunction write_db\n function for writing mysql database from python\n version 0.0\n mydb = mysql database connection\n sql = query function\n multival:\n True = write multiple rows\n False = write single row\n val= query values:\n multival = True => val => Lists\n multival = False => val => Tuples\n'''\ndef write_db(mydb, sql, val, multival):\n mycursor = mydb.cursor()\n if not multival:\n mycursor.execute(sql, val)\n else:\n mycursor.executemany(sql, val)\n mydb.commit()\n\n'''\nfunction read_db\n function for reading mysql database from python\n version 0.0\n mydb = mysql database connection\n sql = query function\n val= query values:\n'''\ndef read_db(mydb, sql, val):\n mycursor = mydb.cursor()\n mycursor.execute(sql, val)\n return mycursor.fetchall()\n\ndef add_overlays(frame, faces, frame_rate):\n if faces is not None:\n for face in faces:\n face_bb = face.bounding_box.astype(int)\n cv2.rectangle(frame,\n (face_bb[0], face_bb[1]), (face_bb[2], face_bb[3]),\n (0, 255, 0), 2)\n if face.name is not None:\n cv2.putText(frame, face.name, (face_bb[0], face_bb[3]),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),\n thickness=2, lineType=2)\n\n cv2.putText(frame, str(frame_rate) + \" fps\", (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),\n thickness=2, lineType=2)\n\ndef main(args):\n frame_interval = 3 # Number of frames after which to run face detection\n fps_display_interval = 5 # seconds\n frame_rate = 0\n frame_count = 0\n\n video_capture = cv2.VideoCapture(0)\n face_recognition = face.Recognition()\n start_date = time.strftime(\"%Y-%m-%d\",time.localtime())\n start_time = time.time()\n\n if args.debug:\n print(\"Debug enabled\")\n face.debug = True\n\n # connect to class database\n mydb = my.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"\",\n database=\"sac\"\n )\n\n # add column for today's class \n sql = \"select column_name from information_schema.columns where table_name = 'RPL_att' and column_name = %s\"\n val = (start_date,)\n x = read_db(mydb, sql, val)\n if not x:\n sql = \"alter table RPL_att add `\" + start_date + \"` time\"\n val = ()\n write_db(mydb, sql, val, False)\n \n # read class attendance\n sql = \"select Name, `2018-12-27` from RPL_att\"\n val = ()\n read_att = read_db(mydb, sql, val)\n\n class_att = {}\n for i in range(len(read_att)):\n class_att[read_att[i][0]]=read_att[i][1]\n\n play = True\n while play:\n # Capture frame-by-frame\n ret, frame = video_capture.read()\n\n if (frame_count % frame_interval) == 0:\n faces = face_recognition.identify(frame, 0.5)\n \n # Check our current fps\n end_time = time.time()\n if (end_time - start_time) > fps_display_interval:\n frame_rate = int(frame_count / (end_time - start_time))\n start_time = time.time()\n frame_count = 0\n \n add_overlays(frame, faces, frame_rate)\n\n frame_count += 1\n cv2.imshow('Attendance', frame)\n cv2.moveWindow('Attendance', 405, 180)\n\n # Update attendance\n for fc in faces:\n if fc.name is not \"Unknown\":\n class_att[fc.name] = time.strftime(\"%H:%M:%S\",time.localtime())\n '''\n # realtime db update\n sql = \"update RPL_att set `\" + start_date + \"` = %s where Name = %s\" \n val = (time.strftime(\"%H:%M:%S\",time.localtime()), fc.name)\n write_db(mydb, sql, val)\n '''\n \n if cv2.waitKey(100) & 0xFF == ord('q'):\n break\n play = cv2.getWindowProperty('Attendance', 0) >= 0 # check if 'Attendance' window is closed\n\n # When everything is done, release the capture\n video_capture.release()\n cv2.destroyAllWindows()\n\n # update database\n write_att = []\n for key in class_att.keys():\n write_att.append((class_att[key],key))\n \n sql = \"update RPL_att set `\" + start_date + \"` = %s where Name = %s\" \n write_db(mydb, sql, write_att, True)\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--debug', action='store_true',\n help='Enable some debug outputs.')\n return parser.parse_args(argv)\n\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n", "sub_path": "real_time_face_recognition.py", "file_name": "real_time_face_recognition.py", "file_ext": "py", "file_size_in_byte": 6236, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "packages.face", "line_number": 76, "usage_type": "name"}, {"api_name": "packages.face.bounding_box.astype", "line_number": 77, "usage_type": "call"}, {"api_name": "packages.face.bounding_box", "line_number": 77, "usage_type": "attribute"}, {"api_name": "packages.face", "line_number": 77, "usage_type": "name"}, {"api_name": "cv2.rectangle", "line_number": 78, "usage_type": "call"}, {"api_name": "packages.face.name", "line_number": 81, "usage_type": "attribute"}, {"api_name": "packages.face", "line_number": 81, "usage_type": "name"}, {"api_name": "cv2.putText", "line_number": 82, "usage_type": "call"}, {"api_name": "packages.face.name", "line_number": 82, "usage_type": "attribute"}, {"api_name": "packages.face", "line_number": 82, "usage_type": "name"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 83, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 87, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 96, "usage_type": "call"}, {"api_name": "packages.face.Recognition", "line_number": 97, "usage_type": "call"}, {"api_name": "packages.face", "line_number": 97, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 98, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 98, "usage_type": "call"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "packages.face.debug", "line_number": 103, "usage_type": "attribute"}, {"api_name": "packages.face", "line_number": 103, "usage_type": "name"}, {"api_name": "mysql.connector.connect", "line_number": 106, "usage_type": "call"}, {"api_name": "mysql.connector", "line_number": 106, "usage_type": "name"}, {"api_name": "time.time", "line_number": 140, "usage_type": "call"}, {"api_name": "time.time", "line_number": 143, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 149, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 150, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 155, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 155, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 163, "usage_type": "call"}, {"api_name": "cv2.getWindowProperty", "line_number": 165, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 169, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 180, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 188, "usage_type": "attribute"}]} +{"seq_id": "551369872", "text": "import argparse\nimport sys\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\nimport requests\nimport os\nimport pandas as pd\nimport numpy as np\nimport json\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-u\", \"--uid\",\n help=\"this uid of admin0 in the destination DHIS2 system where you are importing org units from.\")\nparser.add_argument(\"-n\", \"--name\", help='this name of the admin0 in geoconnect you want to pull in org units from')\nparser.add_argument(\"-a\", \"--attribute\", help='this is the geoconnect attribute uid in DHIS2')\nargs = parser.parse_args()\n\nif len(sys.argv) < 3:\n parser.print_help()\n sys.exit(1)\n\nADMIN0_UID = str(args.uid)\nADMIN0_NAME = str(args.name)\nATTRIBUTE_UID = str(args.attribute)\ndotenv_path = join(dirname(__file__), '.env')\nload_dotenv(dotenv_path)\nAPI_KEY = os.environ.get(\"API_KEY\")\n\n# Get data from Geoconect API\n\ns = requests.session()\ncounter = 1\nresults = []\nwhile True:\n page = s.get(\n \"https://www.geoconnect.org/api/countries?api_key=\" + API_KEY + \"&admin0=\" + ADMIN0_NAME + \"&page=\" + str(\n counter))\n\n df = pd.read_json(page.content.decode('utf-8'))\n\n if len(df.index) == 0:\n break\n\n results.append(df)\n counter = counter + 1\n\norgunits = pd.concat(results)\n\n\n# Build out the org heirarchy for DHIS2\norg_counter = 1\nexport = []\nregions = np.unique(orgunits['admin1'].values)\nfor region in regions:\n\n region_uid = \"ORG\" + '%08d' % org_counter\n parent_id = {\"id\": ADMIN0_UID}\n\n row = {\"name\": region,\n \"shortName\": region,\n \"openingDate\": \"1970-01-01T00:00:00.000\",\n \"id\": region_uid,\n \"parent\": parent_id}\n export.append(row)\n org_counter = org_counter + 1\n\n admin1_df = orgunits[orgunits['admin1'] == region]\n admin1_df = admin1_df.loc[admin1_df['admin2'].notnull()]\n districts = np.unique(admin1_df[['admin2']].values)\n\n # loop through each of the admin2s for each admin1\n\n for district in districts:\n\n district_uid = \"ORG\" + '%08d' % org_counter\n parent_id = {\"id\": region_uid}\n row = {\"name\": district,\n \"shortName\": district,\n \"openingDate\" : \"1970-01-01T00:00:00.000\",\n \"id\": district_uid,\n \"parent\": parent_id}\n export.append(row)\n org_counter = org_counter + 1\n\n # loop through each of the admin3s for each admin2\n admin2_df = orgunits[orgunits['admin2'] == district]\n\n admin2_df = admin2_df.loc[admin2_df['admin3'].notnull()]\n subdistricts = np.unique(admin2_df[['admin3']].values)\n\n for index, row in admin2_df.iterrows():\n\n attr_value = {\"id\": ATTRIBUTE_UID}\n attribute_data = {\"value\": row['geoconnect_id'], \"attribute\": attr_value}\n attributeValues = [attribute_data]\n\n subdistrict_uid = \"ORG\" + '%08d' % org_counter\n\n parent_id = {\"id\": district_uid}\n\n row = {\"name\": row['admin3'],\n \"shortName\": row['admin3'],\n \"openingDate\": \"1970-01-01T00:00:00.000\",\n \"id\": subdistrict_uid,\n \"parent\": parent_id,\n \"attributeValues\": attributeValues}\n\n export.append(row)\n org_counter = org_counter + 1\n\n\norgunits = {\"organisationUnits\": export}\njstr = json.dumps(orgunits, ensure_ascii=False, indent=4)\nprint(jstr)", "sub_path": "geoconnect/import_orgs.py", "file_name": "import_orgs.py", "file_ext": "py", "file_size_in_byte": 3415, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 25, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 26, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "requests.session", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 89, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "340486360", "text": "import numpy as np\nn,v0,vn = 30,0.5,2.3\nA = np.matrix(np.tril(np.ones((n,n))))\nb = np.matrix([[vn-v0]]*n)\nu = np.linalg.inv(A.T*A)*A.T*b\nv = [v0 + np.sum(u[:i]) for i in range(0,n+1)]\n\nimport matplotlib.pyplot as plt\nplt.plot(v, 'bs-', label='v(t)')\nplt.plot(u, 'gs-', label='u(t)')\nplt.legend(frameon=False)\nplt.show()\n", "sub_path": "src/ch6/1-lqr/lqr1.py", "file_name": "lqr1.py", "file_ext": "py", "file_size_in_byte": 320, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.matrix", "line_number": 3, "usage_type": "call"}, {"api_name": "numpy.tril", "line_number": 3, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 3, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 5, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "432301525", "text": "import pandas as pd\nimport re\nimport codecs\nimport sys\nimport nltk\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom nltk.corpus import stopwords\nimport string\n\n# Function to remove Stopwords\ndef remove_stopwords(line):\n \n stop_words = set(stopwords.words('english'))\n line = [w for w in line if not w in stop_words]\n return line\n\n#Function to remove Punctuation and normalize characters to lowercase\ndef remove_punc_lower(line):\n line=\"\".join([char for char in line if char not in string.punctuation])\n line=line.lower()\n return line\n\n# Function to Tokenize words\ndef tokenize(line):\n tokens = re.split('\\W+', line)\n return tokens\n\n\nif __name__ == \"__main__\":\n input_path = 'pos.txt'\n f=open(input_path)\n pos=f.read().split('\\n')\n pos_stop=[]\n pos_no_stop=[]\n for i in range (len(pos)):\n p=tokenize(remove_punc_lower(pos[i]))\n pos_no_stop.append(p)\n p=remove_stopwords(p)\n pos_stop.append(p)\n train_list,val_list=train_test_split(pos_stop,train_size=0.8,random_state=0)\n val_list,test_list=train_test_split(val_list,test_size=0.5,random_state=0)\n np.savetxt(\"train.csv\", train_list, delimiter=\",\",fmt='%s')\n np.savetxt(\"val.csv\", val_list, delimiter=\",\", fmt='%s')\n np.savetxt(\"test.csv\", test_list, delimiter=\",\", fmt='%s')\n train_list_no_stopword,val_list_no_stopword=train_test_split(pos_no_stop,train_size=0.8,random_state=0)\n val_list_no_stopword,test_list_no_stopword=train_test_split(val_list_no_stopword,test_size=0.5,random_state=0)\n np.savetxt(\"train_no_stopword.csv\", train_list_no_stopword,delimiter=\",\", fmt='%s')\n np.savetxt(\"val_no_stopword.csv\", val_list_no_stopword,delimiter=\",\", fmt='%s')\n np.savetxt(\"test_no_stopword.csv\", test_list_no_stopword,delimiter=\",\", fmt='%s')\n", "sub_path": "Assignment_1/Shuo_NLP_Assignment_1.py", "file_name": "Shuo_NLP_Assignment_1.py", "file_ext": "py", "file_size_in_byte": 1822, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "nltk.corpus.stopwords.words", "line_number": 14, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 14, "usage_type": "name"}, {"api_name": "string.punctuation", "line_number": 20, "usage_type": "attribute"}, {"api_name": "re.split", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "171406468", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys,json\nimport json\n\nfrom jubatus.classifier.client import Classifier\nfrom jubatus.classifier.types import LabeledDatum\nfrom jubatus.common import Datum\nfrom sklearn.cross_validation import train_test_split\nimport numpy\nfrom itertools import izip\n\n\ndef parse_args():\n from optparse import OptionParser, OptionValueError\n p = OptionParser()\n p.add_option('-s', '--server_ip', action='store',\n dest='server_ip', type='string', default='127.0.0.1')\n p.add_option('-p', '--server_port', action='store',\n dest='server_port', type='int', default='9199')\n p.add_option('-n', '--name', action='store',\n dest='name', type='string', default='tutorial')\n return p.parse_args()\n\ndef get_most_likely(estm):\n ans = None\n prob = None\n result = {}\n result[0] = ''\n result[1] = 0\n for res in estm:\n if prob == None or res.score > prob :\n ans = res.label\n prob = res.score\n result[0] = ans\n result[1] = prob\n return result\n\ndef cross_validation_python():\n train_data = numpy.array([])\n train_label = numpy.array([])\n test_data = numpy.array([])\n test_label = numpy.array([])\n x_vector = []\n y_vector = []\n first_flag = 1 \n for line in open('election_data.json'):\n label, dat = line[:-1].split('\\t')\n y_vector.append(label)\n x_vector = numpy.array(dat)\n if first_flag == 1:\n train_data = numpy.hstack((train_data, x_vector))\n train_label = numpy.array(y_vector)\n first_flag = 0\n else:\n train_data = numpy.vstack((train_data, x_vector))\n train_label = numpy.array(y_vector)\n train_list = [train_data, train_label]\n return train_list\n\n\nif __name__ == '__main__':\n options, remainder = parse_args()\n\n classifier = Classifier(options.server_ip,options.server_port, options.name, 10.0)\n\n\n train_list = cross_validation_python()\n data_train, data_test, label_train, label_test = train_test_split(train_list[0], train_list[1])\n\n for label, dat in izip(label_train, data_train):\n data_dict = json.loads(dat[0])\n datum = Datum(data_dict)\n classifier.train([LabeledDatum(label, datum)])\n\n\n\n\n\n count_ok = 0\n count_ng = 0\n #for label, dat in izip(label_test, data_test):\n for line in open('j_c_2015.json'):\n label, dat = line[:-1].split('\\t')\n data_dict = json.loads(dat)\n datum = Datum(data_dict)\n ans = classifier.classify([datum])\n if ans != None:\n estm = get_most_likely(ans[0])\n if (estm[0] == \"1\"):\n result = \"OK\"\n print(dat)\n print(result + \",\" + label + \", \" + estm[0] + \", \" + str(estm[1]))\n count_ok += 1\n else:\n result = \"NG\"\n count_ng += 1\n print(\"===================\")\n print(\"OK: {0}\".format(count_ok))\n print(\"NG: {0}\".format(count_ng))\n", "sub_path": "election_predict.py", "file_name": "election_predict.py", "file_ext": "py", "file_size_in_byte": 3049, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "optparse.OptionParser", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "jubatus.classifier.client.Classifier", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 70, "usage_type": "call"}, {"api_name": "itertools.izip", "line_number": 72, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 73, "usage_type": "call"}, {"api_name": "jubatus.common.Datum", "line_number": 74, "usage_type": "call"}, {"api_name": "jubatus.classifier.types.LabeledDatum", "line_number": 75, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 86, "usage_type": "call"}, {"api_name": "jubatus.common.Datum", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "522424691", "text": "from selenium import webdriver\nimport time, sys\n\n#conf\nlogin = 'login'\npassword = 'password'\n\n\nchromedriver = 'C:\\\\Users\\\\Szymon\\\\Documents\\\\Python file\\\\pythonAttemptTwo\\\\chromedriver.exe'\nbrowser = webdriver.Chrome(chromedriver)\n\ndef logging():\n browser.get('https://gmail.com')\n \n url = browser.current_url\n print('Podawanie adresu...', end=' ')\n log = browser.find_element_by_id('identifierId')\n log.send_keys(login)\n button = browser.find_element_by_id('identifierNext')\n button.click()\n print('Pomyślne')\n\n while url == browser.current_url:\n time.sleep(1)\n\n print('Podawanie hasła...', end=' ')\n try:\n passw = browser.find_element_by_xpath(\"//input[@class='whsOnd zHQkBf']\")\n passw.send_keys(password)\n button = browser.find_element_by_xpath(\"//span[@class='RveJvd snByac']\")\n button.click()\n except:\n return 0\n print('Pomyślne')\n #waiting for page reload\n x = 0\n while not browser.current_url.endswith('inbox'):\n time.sleep(1)\n x += 1\n if x == 8:\n return 0\n\n return 1\n\ndef creatingNew(destination, topic, message):\n button = browser.find_element_by_xpath(\"//div[@class='T-I J-J5-Ji T-I-KE L3']\")\n button.click()\n\n while not browser.current_url.endswith('compose=new'):\n time.sleep(1)\n print('Podawanie wiadomości')\n\n adress = browser.find_element_by_xpath(\"//textarea[@class='vO']\")\n adress.send_keys(destination)\n adress = browser.find_element_by_xpath(\"//input[@class='aoT']\")\n adress.send_keys(topic)\n adress = browser.find_element_by_xpath(\"//div[@class='Am Al editable LW-avf tS-tW']\")\n adress.send_keys(message)\n print('Podano wiadomość, czy chcesz wysłać wiadowmość do: %s? Tak lub nie' % messAdress)\n if input().lower() == 'nie':\n return 0\n button = browser.find_element_by_xpath(\"//div[@class='T-I J-J5-Ji aoO v7 T-I-atl L3']\")\n button.click()\n print('Wiadomość wysłano pomyślnie!')\n return 1\n\n\n#for testing purposes\nif len(sys.argv) != 4:\n print('Wymagany adresat, temat oraz treść')\n messAdress = 'adress@gmai.com'\n messTopic = 'Wiadomość testowa'\n messText = '1/n2'\nelse:\n messAdress = sys.argv[2]\n messTopic = sys.argv[3]\n messText = sys.argv[4]\n\n\nwhile logging() == 0:\n print('Błąd logowania')\n browser.get('https://accounts.google.com/AccountChooser?service=mail&continue=https://mail.google.com/mail/')\n logging()\n if browser.current_url.endswith('inbox'):\n break\nprint('Logowanie pomyślne')\n\ncreatingNew(messAdress, messTopic, messText)\n\n\n\n\n", "sub_path": "Chapter 11/postLog.py", "file_name": "postLog.py", "file_ext": "py", "file_size_in_byte": 2616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 69, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 77, "usage_type": "attribute"}]} +{"seq_id": "289945527", "text": "import cv2\nimport numpy as np\nimport os\n\nprint(os.getcwd())\n\ndef detect(): \n \n video_file = os.path.join(os.getcwd(), 'angelapp/modules/classification/Data/video.mp4')\n\n video = cv2.VideoCapture(video_file)\n\n count = 0\n num_frames = 0\n (grabbed, frame) = video.read()\n print(frame.shape)\n # fourcc = cv2.VideoWriter_fourcc(*'MP4V')\n out = cv2.VideoWriter(os.path.join(os.getcwd(), 'angelapp/media/output.avi'), -1, 20.0, (frame.shape[1], frame.shape[0]))\n\n while True:\n (grabbed, frame) = video.read()\n if not grabbed:\n break\n\n num_frames += 1\n\n blur = cv2.GaussianBlur(frame, (21, 21), 0)\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\n \n lower = [18, 50, 50]\n upper = [35, 255, 255]\n lower = np.array(lower, dtype=\"uint8\")\n upper = np.array(upper, dtype=\"uint8\")\n mask = cv2.inRange(hsv, lower, upper)\n output = cv2.bitwise_and(frame, hsv, mask=mask)\n out.write(output)\n no_red = cv2.countNonZero(mask)\n # cv2.imshow(\"output\", output)\n #print(\"output:\", frame)\n # print('asdasdasada')\n if int(no_red) > 20000:\n count += 1\n # print ('Fire detected')\n #print(int(no_red))\n #print(\"output:\".format(mask))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n percent_fire = (count / num_frames) * 100\n if (percent_fire >= 10):\n print(\"Fire Detected\")\n\n cv2.destroyAllWindows()\n # video.release()\n out.release()\n\n print('yooo', out)\n return (out, percent_fire >= 10)\n", "sub_path": "Server/angelapp/modules/fire_detection.py", "file_name": "fire_detection.py", "file_ext": "py", "file_size_in_byte": 1598, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.getcwd", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.countNonZero", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "98975878", "text": "#\n# Find Salary Day for any year, based on user input\n# Author: Vignesh Narasimhulu\n#\n# n is salary day if n falls on Monday to Friday\n# (n-1) is salary day if n is Saturday\n# (n-2) is salary day if n is Sunday\n#\n\n#work with days, week, month\nimport calendar\nimport datetime\nfrom datetime import date\nfrom datetime import time\nfrom datetime import datetime\nfrom datetime import timedelta\n\n#regex match\nimport re\n\n#use sys to exit\nimport sys\n\n#################\n##FUNCTION BEGIN\n#################\n# leap year function - not used in this script to take any action, but just to print on screen\ndef leap(x):\n p=re.compile('.000')\n m=p.match(str(x))\n if m:\n #print(\"1000 year pattern matched\")\n y=int(x)%400\n else:\n #print (\"1000 year pattern not matched\")\n y=int(x)%4\n\n if (y == 0):\n #print (\"leap\")\n return 0\n else:\n #print (\"noleap\")\n return 1\n\n#################\n##FUNCTION END\n#################\n\n###########################\n## MAIN Script beings here\n###########################\ntoday = date.today()\n#determine which year we are in\nyear = today.year\nlcheck=leap(year)\nif (lcheck == 0):\n print (\"We are in a leap year: \" +str(year) + \"\\n\")\nelse:\n print (\"We are not in a Leap year: \" +str(year) + \"\\n\")\n\n#print(\"The current year is: \" + str(year) + \"\\n\")\nprint (\"Assumptions:\\n1. If your pay day falls on weekday, salary will be credited on the same day\\n2. If the pay day falls on weekend, salary will be credited on Friday.\\n\")\n\n#define the pay day\ntry:\n payday=int(input(\"What is your usual pay day? [1-31]: \"))\nexcept ValueError:\n print(\"Please re-run the script and provide a valid integer as input. Thanks!\")\n sys.exit(\"Quitting\")\n\nif re.match(\"^(3[01]|[12][0-9]|[1-9])$\",str(payday)): \n print(\"Your usual pay day is: \" + str(payday) + \"\\n\")\nelse:\n print(\"Please provide a valid day of the month [1-31]\\n\")\n sys.exit()\n\n#calendar.weekday(year, month, day)\nprint(\"In the year \" + str(year) + \", Salary will be credited on: \")\nfor m in range(1,13):\n month=calendar.month_name[m]\n lastday=calendar.monthrange(year,m)[1]\n #print (\"Last day is: \" + str(month) + str(lastday))\n diff=payday-lastday\n if (diff > 0):\n #print(\"As this month \" +str(month) +\" has less days than usual pay day, Pay for this month will be received on the last working day of the month\\n\")\n cday=calendar.weekday(year, m, lastday)\n if (cday == 6):\n pday=int(lastday-2)\n print(\"%10s %2d\" % (month, pday))\n continue\n elif (cday == 5):\n pday=int(lastday-1)\n print(\"%10s %2d\" % (month, pday))\n continue\n else:\n pday=lastday\n print(\"%10s %2d\" % (month, pday))\n continue\n #else:\n #print(str(month) + \": proceed with next\")\n\n #Determine first 3 letters of the month to compare it later with salary day's month\n smo=month[0:3]\n #output of below command is a number, where 0 is Monday\n day=calendar.weekday(year, m, payday)\n #print(day)\n # days=[\"Mon\",\"Tue\",\"Wed\",\"Thu\",\"Fri\",\"Sat\",\"Sun\"]\n if (day == 6):\n #using timedelta approach is foolproof if salary day is 2 or 1\n xday=datetime(year, m, payday)\n yday=timedelta(days=2)\n #yyyy-mm-dd hh:mm:ss 2018-03-10 00:00:00\n tpday=xday-yday\n #strftime converts datetime to string in a readable format\n xpday=tpday.strftime('%d%b')\n ypday=xpday[2:5]\n if ( smo == ypday):\n pday=xpday[0:2]\n else:\n pday=xpday\n print(\" \" +str(month)+ \" \" +str(pday))\n elif (day == 5):\n xday=datetime(year, m, payday)\n yday=timedelta(days=1)\n tpday=xday-yday\n xpday=tpday.strftime('%d%b')\n ypday=xpday[2:5]\n if ( smo == ypday):\n pday=xpday[0:2]\n else:\n pday=xpday\n print(\" \" +str(month)+ \" \" +str(pday))\n else:\n pday=payday\n print(\"%10s %2d\" % (month, pday))\n", "sub_path": "salary_day.py", "file_name": "salary_day.py", "file_ext": "py", "file_size_in_byte": 3712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "re.compile", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 52, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 69, "usage_type": "call"}, {"api_name": "re.match", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 75, "usage_type": "call"}, {"api_name": "calendar.month_name", "line_number": 80, "usage_type": "attribute"}, {"api_name": "calendar.monthrange", "line_number": 81, "usage_type": "call"}, {"api_name": "calendar.weekday", "line_number": 86, "usage_type": "call"}, {"api_name": "calendar.weekday", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 111, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "57824548", "text": "import http.server\nimport urllib.parse as urlparse\nimport sqlite3\nimport json\nimport search as search\n\n\nclass MyHttpSearch(http.server.BaseHTTPRequestHandler):\n def do_GET(self):\n url = urlparse.urlparse(self.path)\n path = url.path\n query_dict = urlparse.parse_qs(url.query)\n if path == '/':\n self._set_headers(200, 'ok', 'text/html')\n html = self.get_html_page_form()\n self.wfile.write(bytes(html, 'UTF-8'))\n elif path == '/result':\n if 'q' in query_dict.keys() and 'f' in query_dict.keys():\n if query_dict['f'][0] == 'json':\n self._set_headers(200, 'ok', 'application/json')\n self.wfile.write(bytes(json.dumps(self.get_data(query_dict['q'][0])), 'UTF-8'))\n elif query_dict['f'][0] == 'html':\n self._set_headers(200, 'ok', 'text/html')\n data = self.get_data(query_dict['q'][0])\n html = self.get_html_page_result(query_dict['q'][0], data)\n self.wfile.write(bytes(html, 'UTF-8'))\n elif path == '/search':\n if 'c_name' in query_dict.keys():\n self.send_response(302)\n self.send_header('Location', '/result?q=' + query_dict['c_name'][0] + '&f=html')\n self.end_headers()\n else:\n self._set_headers(200, 'ok', 'text/plain')\n self.wfile.write(bytes(\"you asked for \" + path[1:], 'UTF-8'))\n\n def get_data(self, c_name):\n conn = sqlite3.connect(\"../data/scorelib.dat\")\n cur = conn.cursor()\n data = search.getScores(cur, c_name)\n conn.commit()\n cur.close()\n conn.close()\n return data\n\n def get_html_page_result(self, c_name, data):\n template = \"\"\"\n \n

Composers and scores for text: %s

\n
    \"\"\"\n\n for val in data:\n scores_items = \"\"\n for score in val['scores']:\n scores_items += \"\"\"\n
  • \n %s\n
  • \n \"\"\" % (score['name'], )\n\n scores = \"
      %s
    \" % (scores_items, )\n\n template += \"\"\"\n
  • \n %s\n %s\n
  • \n \"\"\" % (val['composer'], scores)\n\n template += \"\"\"\n
\n
\n
\n Back to home\n \n\"\"\"\n\n html = template % (c_name, )\n return html\n\n def get_html_page_form(self):\n template = \"\"\"\n \n \n

Which composer you want? (substring)

\n
\n \n \n
\n \n \"\"\"\n\n html = template % ( )\n return html\n\n def _set_headers(self, code, code_message, content_type=None):\n self.protocol_version = 'HTTP/1.1'\n self.send_response(code, code_message)\n if content_type is not None:\n self.send_header('Content-type', content_type)\n self.end_headers()\n\nif __name__ == \"__main__\":\n port = 8000\n print('Listening on localhost:%s' % port)\n server = http.server.HTTPServer(('', port), MyHttpSearch)\n server.serve_forever()\n\n", "sub_path": "src/http-server-search.py", "file_name": "http-server-search.py", "file_ext": "py", "file_size_in_byte": 3411, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "http.server.server", "line_number": 8, "usage_type": "attribute"}, {"api_name": "http.server", "line_number": 8, "usage_type": "name"}, {"api_name": "urllib.parse.urlparse", "line_number": 10, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 10, "usage_type": "name"}, {"api_name": "urllib.parse.parse_qs", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 12, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 37, "usage_type": "call"}, {"api_name": "search.getScores", "line_number": 39, "usage_type": "call"}, {"api_name": "http.server.server.HTTPServer", "line_number": 105, "usage_type": "call"}, {"api_name": "http.server.server", "line_number": 105, "usage_type": "attribute"}, {"api_name": "http.server", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "443270784", "text": "from flask import render_template, redirect, url_for, request, jsonify\nfrom . import bp\nfrom app.models import Cart, Category, Product, Order, User, OrderProduct, db, Address\nfrom flask_login import login_user, logout_user, current_user, login_required\nimport uuid\nimport hashlib\n\n\"\"\"\nRender Checkout page only if user is logged in, else\nRender login page\n\"\"\"\n@bp.route(\"/checkout\", methods=[\"GET\"])\n@login_required\ndef checkout_page():\n cart_items = Cart.query.all()\n total = []\n cart_products = []\n for item in cart_items:\n product = Product.query.filter_by(id=item.product_id).first()\n # Calculate product subtotal\n subtotal = item.quantity * product.serialize[\"price\"]\n total.append(subtotal)\n cart_products.append({\n \"name\": product.serialize[\"name\"],\n \"id\": product.serialize[\"id\"],\n \"price\": product.serialize[\"price\"],\n \"image_url\": product.serialize[\"image_url\"],\n \"description\": product.serialize[\"description\"],\n \"quantity\": item.quantity,\n \"subtotal\": item.quantity * product.serialize[\"price\"]\n })\n uuid_random = uuid.uuid4()\n text = f\"4Vj8eK4rloUd272L48hsrarnUA~508029~{str(uuid_random)}~{sum(total)}~USD\"\n signature = hashlib.md5(text.encode()).hexdigest()\n url = request.url[:-8]\n return render_template(\"orders/checkout.html\",\n cart_items=cart_products, total=sum(total),\n siz=len(cart_products),uuid_random = uuid_random, signature = signature , url = url)\n\n\"\"\"\nCreate new Order\n\"\"\"\n@bp.route(\"/orders/response\", methods=['GET'])\ndef PayUTest ():\n status = str(request.args.get('lapTransactionState'))\n message = request.args.get(\"message\")\n processingDate = request.args.get(\"processingDate\")\n\n msg_headers = {\"h1\": \"\", \"h2\":\"\"}\n if status == \"APPROVED\":\n msg_headers[\"h1\"] = \"Transaction accepted\"\n msg_headers[\"h2\"] = \"Thank You For Buying from Us.\"\n elif status == \"DECLINED\":\n msg_headers[\"h1\"] = \"Transaction declined\"\n msg_headers[\"h2\"] = \"Please tried to make the payment later.\"\n elif status == \"PENDING\":\n msg_headers[\"h1\"] = \"Pending transaction\"\n msg_headers[\"h2\"] = \"Please check the status of the transaction in 10 min.\"\n pass\n return render_template(\"orders/order_complete.html\", status = status, processingDate = processingDate, message = message, msg_headers = msg_headers) \n \n@bp.route(\"/orders/create\", methods=['POST'])\ndef create_order():\n status0 = str(request.form['state_pol'])\n status1 = str(request.form['response_message_pol'])\n merchant_id = str(request.form['merchant_id'])\n sign = str(request.form['sign'])\n currency = str(request.form['currency'])\n email_buyer = str(request.form['email_buyer'])\n\n print(\"PayU POST confirmation was received\\n\")\n print(f\"Status : {status1} {status0}\")\n print(f\"merchant_id: {merchant_id}\")\n print(f\"sign: {sign}\")\n print(f\"currency: {currency}\")\n print(f\"email_buyer: {email_buyer} \\n\")\n\n if status1 == \"APPROVED\":\n print(\"Saving orden in databese ...\")\n payment_method = request.form[\"lapPaymentMethodType\"]\n country = request.form[\"shipping_address\"]\n zip_code = str(11111)\n city = request.form[\"shipping_city\"]\n user_id = User.query.filter_by(email=request.form[\"email_buyer\"]).first().id\n\n # Create address object that will be saved in Order\n # Object\n address = Address(\n city=city,\n country=country,\n zip_code=zip_code)\n\n Address.insert(address)\n\n cart_items = Cart.query.all()\n\n total = []\n \"\"\"\n Loop through cart items for given user \n and calculate Order Total Amount\n \"\"\"\n for item in cart_items:\n product = Product.query.filter_by(id=item.product_id).first()\n subtotal = item.quantity * product.serialize[\"price\"]\n total.append(subtotal)\n\n order = Order(user_id=user_id,\n payment_method=payment_method,\n total_amount=sum(total),\n address_id=address.id)\n\n Order.insert(order)\n products = [ci for ci in cart_items]\n\n order_products = []\n for prod in products:\n order_item = OrderProduct(\n order_id=order.id,\n product_id=prod.product_id,\n quantity=prod.quantity,\n user_id=user_id\n )\n OrderProduct.insert(order_item)\n # Clear Cart after Order has been created\n try:\n num_rows_deleted = db.session.query(Cart).delete()\n db.session.commit()\n print(num_rows_deleted)\n except Exception as e:\n db.session.rollback()\n print(order_products)\n else:\n print(f\"Transaction declined or pending {status1} : {status0}\")\n response = {\"message\": \"successful\"}\n return jsonify(response)\n\n# List all user orders in db\n@bp.route(\"/orders\", methods=['GET'])\n@login_required\ndef get_orders():\n orders = Order.query.filter_by(user_id=current_user.id).all()\n orders_data = []\n for order in orders:\n order_prod = OrderProduct.query.filter_by(order_id=order.id).first()\n if order_prod:\n product = Product.query.filter_by(id=order_prod.product_id).first()\n temp = {\n \"name\": product.name,\n \"price\": product.price,\n \"quantity\": order_prod.quantity,\n \"order_date\": order.order_date.strftime(\"%b %d %Y %H:%M\"),\n \"total_amount\": order.total_amount\n }\n orders_data.append(temp)\n print(order.id)\n print(orders_data)\n return render_template(\"orders/orders.html\",\n orders=orders_data, siz=len(orders_data))\n\n# Render Completion page\n@bp.route(\"/orders/complete\")\ndef completed_order():\n return render_template(\"orders/order_complete.html\")\n", "sub_path": "app/routes/orders.py", "file_name": "orders.py", "file_ext": "py", "file_size_in_byte": 6070, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "app.models.Cart.query.all", "line_number": 15, "usage_type": "call"}, {"api_name": "app.models.Cart.query", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.models.Cart", "line_number": 15, "usage_type": "name"}, {"api_name": "app.models.Product.query.filter_by", "line_number": 19, "usage_type": "call"}, {"api_name": "app.models.Product.query", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.models.Product", "line_number": 19, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 32, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 36, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "app.models.User.query.filter_by", "line_number": 84, "usage_type": "call"}, {"api_name": "app.models.User.query", "line_number": 84, "usage_type": "attribute"}, {"api_name": "app.models.User", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "app.models.Address", "line_number": 88, "usage_type": "call"}, {"api_name": "app.models.Address.insert", "line_number": 93, "usage_type": "call"}, {"api_name": "app.models.Address", "line_number": 93, "usage_type": "name"}, {"api_name": "app.models.Cart.query.all", "line_number": 95, "usage_type": "call"}, {"api_name": "app.models.Cart.query", "line_number": 95, "usage_type": "attribute"}, {"api_name": "app.models.Cart", "line_number": 95, "usage_type": "name"}, {"api_name": "app.models.Product.query.filter_by", "line_number": 103, "usage_type": "call"}, {"api_name": "app.models.Product.query", "line_number": 103, "usage_type": "attribute"}, {"api_name": "app.models.Product", "line_number": 103, "usage_type": "name"}, {"api_name": "app.models.Order", "line_number": 107, "usage_type": "call"}, {"api_name": "app.models.Order.insert", "line_number": 112, "usage_type": "call"}, {"api_name": "app.models.Order", "line_number": 112, "usage_type": "name"}, {"api_name": "app.models.OrderProduct", "line_number": 117, "usage_type": "call"}, {"api_name": "app.models.OrderProduct.insert", "line_number": 123, "usage_type": "call"}, {"api_name": "app.models.OrderProduct", "line_number": 123, "usage_type": "name"}, {"api_name": "app.models.db.session.query", "line_number": 126, "usage_type": "call"}, {"api_name": "app.models.Cart", "line_number": 126, "usage_type": "argument"}, {"api_name": "app.models.db.session", "line_number": 126, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 126, "usage_type": "name"}, {"api_name": "app.models.db.session.commit", "line_number": 127, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 127, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 127, "usage_type": "name"}, {"api_name": "app.models.db.session.rollback", "line_number": 130, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 130, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 135, "usage_type": "call"}, {"api_name": "app.models.Order.query.filter_by", "line_number": 141, "usage_type": "call"}, {"api_name": "app.models.Order.query", "line_number": 141, "usage_type": "attribute"}, {"api_name": "app.models.Order", "line_number": 141, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 141, "usage_type": "name"}, {"api_name": "app.models.OrderProduct.query.filter_by", "line_number": 144, "usage_type": "call"}, {"api_name": "app.models.OrderProduct.query", "line_number": 144, "usage_type": "attribute"}, {"api_name": "app.models.OrderProduct", "line_number": 144, "usage_type": "name"}, {"api_name": "app.models.Product.query.filter_by", "line_number": 146, "usage_type": "call"}, {"api_name": "app.models.Product.query", "line_number": 146, "usage_type": "attribute"}, {"api_name": "app.models.Product", "line_number": 146, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 157, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 139, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 163, "usage_type": "call"}]} +{"seq_id": "190844064", "text": "import boto3\nimport click\nimport os\nimport re\nimport sys\nimport traceback\nimport logging\n\nfrom email.message import EmailMessage\nfrom email.mime.image import MIMEImage\nfrom costreport.accountconfig import AccountConfig\nfrom costreport.scope import Week, Month\nfrom costreport.dftools \\\n import read_ri_prices_csv, read_od_prices_csv\nfrom costreport.template import Template\nfrom costreport.team import Team\nfrom costreport.accountreport import AccountReport\n\nlogging.basicConfig(level=logging.WARN)\n\nTEMPLATES = Template()\n#\nOD_PRICES_CSV = \"on_demand_prices.csv\"\nRI_PRICES_CSV = \"reserved_instance_prices.csv\"\n#\nSPACES = \"   \"\nBOTO3 = boto3.client('ses', 'eu-west-1')\n#\n\n\ndef get_scope(scope_type, serial, csv_dir, rep_dir):\n current = False\n if scope_type == \"week\":\n scope = Week(serial, current, csv_dir, rep_dir)\n elif scope_type == \"month\":\n scope = Month(serial, current, csv_dir, rep_dir)\n else:\n scope = Week(serial, current, csv_dir, rep_dir)\n return scope\n\n\ndef check_or_create(directory, default):\n # not defined? use default\n if not directory:\n directory = default\n # does not exist? create\n if not os.path.exists(directory):\n os.makedirs(default)\n # no slash at the end? append\n if directory[-1:] != '/':\n directory += '/'\n return directory\n\n\ndef sendmail(subject, sender, receiver, default_receiver, message):\n message['From'] = sender\n message['To'] = receiver\n message['Bcc'] = default_receiver\n message['Subject'] = subject\n # noinspection PyBroadException\n try:\n BOTO3.send_raw_email(\n Destinations=[receiver, default_receiver],\n RawMessage={\n 'Data': message.as_string()\n }\n )\n except:\n traceback.print_exc()\n print('an exception occured while trying to send the report \"{s}\" to \"{r}\"'.format(s=subject, r=receiver))\n\n\ndef combine(body, attachments):\n message = EmailMessage()\n message.add_related(body, subtype='html')\n for attachment in attachments:\n cid = attachment['cid']\n buffer = attachment['buffer']\n img = MIMEImage(buffer.read(), _subtype='png')\n img.add_header('Content-ID', cid)\n message.attach(img)\n return message\n\n\ndef get_editorial(scope, entity_name):\n html = ''\n editorial = 'editorial_{scope}.txt'.format(scope=scope.as_filename())\n editorial = os.path.join(os.path.dirname(__file__), editorial)\n # print(\"editorial\", editorial)\n if os.path.isfile(editorial):\n with open(editorial, 'r') as f:\n html = f.read().replace('\\n', '')\n html = html.format(spaces=SPACES, team=entity_name)\n return html\n\n\ndef generate_summary(account_ids, account_reports, config):\n summary_rows = map(lambda account_id: TEMPLATES.get('summary-row', {\n 'account_id': account_id,\n 'cost_rank': account_reports[account_id].get_cost_rank(),\n 'account_name': config.get_linked_account_name(account_id),\n 'total_cost': TEMPLATES.format(account_reports[account_id].get_effective_cost())\n }),\n account_ids)\n summary_rows_html = ''\n for row in summary_rows:\n summary_rows_html += row\n sum_total = sum(map(lambda account: account.get_effective_cost(), account_reports.values()))\n summary = TEMPLATES.get('summary', {\n 'summary_rows': summary_rows_html,\n 'sum_total_cost': TEMPLATES.format(sum_total)\n })\n return summary\n\n\ndef generate_accounts(account_ids, account_reports):\n html = ''\n for account_id in account_ids:\n account_report = account_reports[account_id]\n html += account_report.generate_html()\n return html\n\n\ndef generate_message_body(account_ids, account_reports, scope, config, entity_name_value, send_from, layout_images):\n header = TEMPLATES.get('header', {\n 'scope': scope,\n 'header_image': layout_images['header']['src']\n })\n intro = TEMPLATES.get('intro', {\n 'team': entity_name_value,\n 'scope': scope,\n 'google_groups_name': 'team-' + entity_name_value,\n 'editorial': get_editorial(scope, entity_name_value)\n })\n summary = generate_summary(account_ids, account_reports, config)\n accounts = generate_accounts(account_ids, account_reports)\n footer = TEMPLATES.get('footer', {\n 'sender_email': send_from,\n 'byebye_image': layout_images['byebye']['src']\n })\n document = TEMPLATES.get('document', {\n 'header': header,\n 'intro': intro,\n 'summary': summary,\n 'accounts': accounts,\n 'footer': footer,\n 'notes': TEMPLATES.get('notes'),\n 'footer_image': layout_images['footer']['src']\n })\n return document\n\n\ndef generate_message_attachments(account_ids, account_reports, layout_images):\n attachments = []\n for account_id in account_ids:\n attachments.extend(account_reports[account_id].attachments)\n attachments.extend(layout_images.values())\n return attachments\n\n\ndef generate_account_reports(account_ids, config, scope, rep_dir):\n account_reports = {}\n for account_id in account_ids:\n account_name = config.get_linked_account_name(account_id)\n account_reports[account_id] = AccountReport(\n account_id, account_name, config, scope, rep_dir, TEMPLATES\n )\n return account_reports\n\n\ndef generate_message(account_ids, account_reports, scope, config, entity_name_value, send_from, layout_images):\n return combine(\n generate_message_body(\n account_ids, account_reports, scope, config, entity_name_value, send_from, layout_images\n ),\n generate_message_attachments(account_ids, account_reports, layout_images)\n )\n\n\ndef check_all_lock(scope, entity_name, rep_dir):\n lock = '{s}_{e}.sent'.format(s=scope.as_new_filename(), e=entity_name)\n subject = '{e} report for {s}'.format(e=entity_name, s=scope.as_new_filename())\n f = os.path.join(rep_dir, lock)\n if os.path.exists(f):\n print('CHECK File: {f} exists. setting to FALSE.'.format(f=f, s=subject))\n return False\n else:\n print('CHECK File: {f} not found. leaving as is.'.format(f=f, s=subject))\n return True\n\n\ndef create_all_lock(scope, entity_name, rep_dir):\n lock = '{s}_{e}.sent'.format(s=scope.as_new_filename(), e=entity_name)\n f = os.path.join(rep_dir, lock)\n open(f, \"a+\").close()\n print(\"lock {f} written\".format(f=f))\n\n\ndef log_sendmail_note(send_mail):\n print()\n if send_mail:\n print(\"--- SEND_MAIL ACTIVATED! ---\")\n else:\n print(\"--- no emails will be sent. ---\")\n print()\n\n\nCONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])\n\n\n@click.command(context_settings=CONTEXT_SETTINGS)\n@click.option('--entity', envvar=\"ENTITY\")\n@click.option('--serial', envvar=\"SERIAL\")\n@click.option(\n '--scope',\n is_flag=False,\n metavar='SCOPE',\n envvar=\"SCOPE\",\n type=click.Choice([\"week\", \"month\"]),\n help='the scope of the report, must be week or month'\n)\n@click.option(\n '--entity-type',\n is_flag=False,\n metavar='TYPE',\n envvar=\"ENTITY_TYPE\",\n type=click.Choice([\"AccountName\", \"TeamName\", \"AccountId\", \"CostEntity\"]),\n help='entity type'\n)\n@click.option(\n '--send-to',\n metavar='TO',\n envvar=\"SEND_TO\",\n help='email address to send to. default: team email address'\n)\n@click.option(\n '--send-from',\n required=True,\n metavar='FROM',\n envvar=\"SEND_FROM\",\n help='email address to send from'\n)\n@click.option(\n '--base-dir',\n is_flag=False,\n metavar='BASE',\n envvar=\"BASE_DIR\",\n default=os.getenv(\"HOME\"),\n help='base directory'\n)\n@click.option(\n '--cfg-dir',\n is_flag=False,\n envvar=\"CFG_DIR\",\n metavar='CFGDIR',\n help='config directory. default: $BASE_DIR/cfg'\n)\n@click.option(\n '--csv-dir',\n is_flag=False,\n metavar='CSVDIR',\n envvar=\"CSV_DIR\",\n help='csv directory. default: $BASE_DIR/csv'\n)\n@click.option(\n '--rep-dir',\n is_flag=False,\n metavar='REPDIR',\n envvar=\"REP_DIR\",\n help='report directory. default: $BASE_DIR/rep'\n)\n@click.option(\n '--send-mail/--no-send-mail',\n is_flag=True,\n default=False,\n envvar=\"SEND_MAIL\",\n help='activate mail dispatch'\n)\n@click.option(\n '--gen-html',\n is_flag=True,\n envvar=\"GEN_HTML\",\n help='additionally generate html'\n)\n@click.option(\n '--debug',\n is_flag=True,\n default=False,\n envvar=\"DEBUG\",\n help='Writes mail to text file')\n@click.option(\n '--team-service-url',\n envvar=\"TEAM_SERVICE_URL\",\n help=\"Base URL to team service\"\n)\n@click.option(\n '--default-receiver',\n envvar=\"DEFAULT_RECEIVER\",\n required=True,\n help=\"Receiver of mail in case there is no address for an account available\"\n)\ndef main(\n entity, serial, scope, entity_type, send_to, send_from,\n base_dir, cfg_dir, csv_dir, rep_dir, send_mail, gen_html,\n debug, team_service_url, default_receiver\n):\n \"\"\"Summary\n\n Extended description\n\n Args:\n entity (str): value (or value pattern) of an entity to be reported. the range of valid\n values (or value patterns) depends on the entity type. valid and entity\n types are:\n 'stups' AccountName\n '1234567890' AccountId\n '00123456' CostEntityId\n 'stups' TeamName\n '13371337' TeamId\n as the value 'stups' suggests, different entity types can have the same\n value. for each entity type, the value 'all' represents all possible\n values of the entity type.\n serial (str): serial of the scope (below), like 2/2016 (week 2 or month 2 of year 2016)\n scope (str): type of the serial in case of ambiguities (like in 2/2016)\n entity_type (str): type of entity given. e.g. for team 'BI', this would be 'TeamName'\n send_to (str): send to somebody else instead of entity owner\n send_from (str):\n base_dir (str):\n cfg_dir (str):\n csv_dir (str):\n rep_dir (str):\n send_mail (bool): not only generate the necessary data, but also really send the report\n debug (bool): not only really send the mail, but also write raw mail to disk\n gen_html (bool):\n \"\"\"\n print(\"COMMAND \", \" \".join(sys.argv[:]))\n\n if base_dir[:-1] != '/':\n base_dir += '/'\n cfg_dir = check_or_create(cfg_dir, base_dir + 'cfg/')\n csv_dir = check_or_create(csv_dir, base_dir + 'csv/')\n rep_dir = check_or_create(rep_dir, base_dir + 'rep/')\n\n print(\"CFG DIR \", cfg_dir)\n print(\"CSV DIR \", csv_dir)\n print(\"REP DIR \", rep_dir)\n\n read_ri_prices_csv(cfg_dir+RI_PRICES_CSV)\n read_od_prices_csv(cfg_dir+OD_PRICES_CSV)\n\n scope = get_scope(scope, serial, csv_dir, rep_dir)\n scope.get_history_data()\n\n accounts_in_scope = scope.get_accounts()\n # in tests we pass a mocked Team object\n if isinstance(team_service_url, str):\n config = AccountConfig(accounts_in_scope, cfg_dir, Team(team_service_url))\n else:\n config = AccountConfig(accounts_in_scope, cfg_dir, team_service_url)\n\n entity_id_values = config.get_entity_ids(entity_type, entity)\n entity_id_type = str.replace(entity_type, 'Name', 'Id')\n entity_name = re.sub(\"(Name|Id)\", \"\", entity_type)\n\n # NOTE: this is a check to prevent sending a report to 'all' for a certain scope twice.\n if entity == 'all':\n should_send_mail = send_mail and check_all_lock(scope, entity_name, rep_dir)\n else:\n should_send_mail = send_mail\n log_sendmail_note(should_send_mail)\n\n # read images only once as they are in every mail\n layout_images = {\n 'header': TEMPLATES.get_image('header'),\n 'footer': TEMPLATES.get_image('footer'),\n 'byebye': TEMPLATES.get_image('byebye')\n }\n\n # noinspection PyBroadException\n try:\n for entity_id_value in entity_id_values:\n\n account_ids = config.get_accounts(entity_id_type, entity_id_value)\n entity_name_value = config.get_entity_name(entity_id_type, entity_id_value)\n\n print(\"ENTITY {en:<14s} ENTITY_TYPE {et} ACCOUNTS {a}\".format(\n en=entity_name_value, et=entity_type, a=account_ids\n ))\n\n account_reports = generate_account_reports(account_ids, config, scope, rep_dir)\n\n if should_send_mail:\n\n message = generate_message(\n account_ids, account_reports, scope, config, entity_name_value, send_from, layout_images\n )\n subject = 'AWS Cost Report for {scope}'.format(\n scope=scope\n )\n if debug:\n os.makedirs('./mail', exist_ok=True)\n with open('./mail/' + str(entity_name_value) + '.txt', 'w') as f:\n f.write(message.as_string())\n receiver = send_to if send_to is not None else config.get_receiver(entity_name, entity_id_value)\n receiver = default_receiver if receiver == \"DEFAULT\" else receiver\n sendmail(subject, send_from, receiver, default_receiver, message)\n print(\"SENT TO \", receiver)\n\n if gen_html:\n\n html = generate_message_body(\n account_ids,\n account_reports,\n scope,\n config,\n entity_name_value,\n send_from,\n layout_images\n )\n with open(os.path.join(base_dir, 'costreport.html'), 'w') as f:\n f.write(html)\n except Exception as e:\n logging.exception(\"Error: %s\", e)\n\n if entity == 'all':\n create_all_lock(scope, entity_name, rep_dir)\n print(\"DONE\")\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "costreport/costreport.py", "file_name": "costreport.py", "file_ext": "py", "file_size_in_byte": 14099, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.basicConfig", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.WARN", "line_number": 19, "usage_type": "attribute"}, {"api_name": "costreport.template.Template", "line_number": 21, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 27, "usage_type": "call"}, {"api_name": "costreport.scope.Week", "line_number": 34, "usage_type": "call"}, {"api_name": "costreport.scope.Month", "line_number": 36, "usage_type": "call"}, {"api_name": "costreport.scope.Week", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 48, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 69, "usage_type": "call"}, {"api_name": "email.message.EmailMessage", "line_number": 74, "usage_type": "call"}, {"api_name": "email.mime.image.MIMEImage", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "costreport.accountreport.AccountReport", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 336, "usage_type": "attribute"}, {"api_name": "costreport.dftools.read_ri_prices_csv", "line_number": 348, "usage_type": "call"}, {"api_name": "costreport.dftools.read_od_prices_csv", "line_number": 349, "usage_type": "call"}, {"api_name": "costreport.accountconfig.AccountConfig", "line_number": 357, "usage_type": "call"}, {"api_name": "costreport.team.Team", "line_number": 357, "usage_type": "call"}, {"api_name": "costreport.accountconfig.AccountConfig", "line_number": 359, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 363, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 401, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 420, "usage_type": "call"}, {"api_name": "os.path", "line_number": 420, "usage_type": "attribute"}, {"api_name": "logging.exception", "line_number": 423, "usage_type": "call"}, {"api_name": "click.command", "line_number": 211, "usage_type": "call"}, {"api_name": "click.option", "line_number": 212, "usage_type": "call"}, {"api_name": "click.option", "line_number": 213, "usage_type": "call"}, {"api_name": "click.option", "line_number": 214, "usage_type": "call"}, {"api_name": "click.Choice", "line_number": 219, "usage_type": "call"}, {"api_name": "click.option", "line_number": 222, "usage_type": "call"}, {"api_name": "click.Choice", "line_number": 227, "usage_type": "call"}, {"api_name": "click.option", "line_number": 230, "usage_type": "call"}, {"api_name": "click.option", "line_number": 236, "usage_type": "call"}, {"api_name": "click.option", "line_number": 243, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 248, "usage_type": "call"}, {"api_name": "click.option", "line_number": 251, "usage_type": "call"}, {"api_name": "click.option", "line_number": 258, "usage_type": "call"}, {"api_name": "click.option", "line_number": 265, "usage_type": "call"}, {"api_name": "click.option", "line_number": 272, "usage_type": "call"}, {"api_name": "click.option", "line_number": 279, "usage_type": "call"}, {"api_name": "click.option", "line_number": 285, "usage_type": "call"}, {"api_name": "click.option", "line_number": 291, "usage_type": "call"}, {"api_name": "click.option", "line_number": 296, "usage_type": "call"}]} +{"seq_id": "581304214", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nimport datetime\nfrom math import sqrt\nfrom numpy import split\nfrom numpy import array\nfrom sklearn.metrics import mean_squared_error\nfrom matplotlib import pyplot\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import LSTM\nfrom keras.layers import RepeatVector\nfrom keras.layers import TimeDistributed\nfrom keras.layers import Bidirectional\nfrom keras.layers import Dropout\nfrom keras.callbacks import EarlyStopping\nfrom keras import optimizers \nfrom keras.layers import Input\nfrom keras import layers\nfrom keras.models import Model\n\ndef plot_loss(r):\n plt.plot(r.history['loss'])\n plt.plot(r.history['val_loss'])\n plt.title('model train vs validation loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper right')\n plt.show()\n\ndef build_and_fit(train_x, train_x_demo, train_y_1, train_y_2, n_input, n_out, epochs_num, batch_size_num):\n \n #set parameters\n epochs, batch_size = epochs_num, batch_size_num\n n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y_1.shape[1]\n \n #reshape output into [samples, timesteps, features]\n train_y_1 = train_y_1.reshape((train_y_1.shape[0], train_y_1.shape[1], 1))\n train_y_2 = train_y_2.reshape((train_y_2.shape[0], train_y_2.shape[1], 1))\n \n #---define model---#\n #time_series\n time_series_input = Input(shape=(n_timesteps, n_features))\n lstm_main = layers.TimeDistributed(Dense(2))(time_series_input)\n \n #one hot matrix of states reshaped => shape similarly as time_series_input\n state_input = Input(shape=(n_out, 50))\n state_dense = layers.Dense(10)(state_input)\n state_dropout = layers.Dropout(0.2)(state_dense)\n \n #for confirmed\n h1_c, h2_c, c_c = layers.LSTM(32, inner_init='orthogonal', return_sequences=True, return_state = True)(lstm_main)\n decoder_c = layers.RepeatVector(n_out)(h2_c)\n decoder_c = layers.LSTM(32, dropout = 0.5, recurrent_dropout=0.2,\n return_sequences=True, return_state = False)(decoder_c, [h2_c, c_c])\n attention_c = layers.dot([decoder_c, h1_c], axes = [2,2])\n attention_c = layers.Activation('softmax')(attention_c)\n context_c = layers.dot([attention_c, h1_c], axes = [2,1])\n decoder_and_context_c = layers.Concatenate(axis=2)([context_c, decoder_c])\n merge_c = layers.Concatenate(axis=2)([decoder_and_context_c, state_dropout])\n dense_c = layers.TimeDistributed(Dense(10))(merge_c)\n dropout_c = layers.Dropout(0.5)(dense_c)\n output_c = layers.TimeDistributed(Dense(1))(merge_c)\n confirmed = layers.LeakyReLU(alpha=0.1, name = 'confirmed')(output_c)\n \n \n #for deaths\n h1_d, h2_d, c_d = layers.LSTM(32, inner_init='orthogonal', return_sequences=True, return_state = True)(lstm_main)\n decoder_d = layers.RepeatVector(n_out)(h2_d)\n decoder_d = layers.LSTM(32, dropout = 0.5, recurrent_dropout=0.2,\n return_sequences=True, return_state = False)(decoder_d, [h2_d, c_d])\n attention_d = layers.dot([decoder_c, h1_c], axes = [2,2])\n attention_d = layers.Activation('softmax')(attention_d)\n context_d = layers.dot([attention_d, h1_d], axes = [2,1])\n decoder_and_context_d = layers.Concatenate(axis=2)([context_d, decoder_d])\n merge_d = layers.Concatenate(axis=2)([decoder_and_context_d, state_dropout])\n dense_d = layers.TimeDistributed(Dense(10))(merge_d)\n dropout_d = layers.Dropout(0.5)(dense_d)\n output_d = layers.TimeDistributed(Dense(1))(merge_d)\n deaths = layers.LeakyReLU(alpha=0.1, name = 'deaths')(output_d)\n \n #put together and compile model\n model = Model([time_series_input,state_input], [confirmed,deaths])\n #opt = optimizers.Adam(lr = 0.001, clipnorm = 1.)\n model.compile(loss='mse', optimizer='Adam')\n \n #check shape\n print(train_x.shape)\n print(train_x_demo.shape)\n print(train_y_1.shape)\n print(train_y_2.shape)\n \n #fit model\n es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10)\n record = model.fit([train_x,train_x_demo], [train_y_1, train_y_2], validation_split=0.2, callbacks = [es],\n epochs=epochs, batch_size=batch_size, verbose=0)\n #plot loss\n plot_loss(record)\n \n return model\n\ndef forecast(model, history, history_demo, n_in, n_out):\n # flatten data\n data = np.array(history)\n data = data.reshape((data.shape[0]*data.shape[1], data.shape[2]))\n # retrieve last observations for input data\n input_x = data[-n_in:, :]\n # reshape into [1, n_input, n]\n input_x = input_x.reshape((1, input_x.shape[0], input_x.shape[1]))\n # reshape into [1, n_input, num_of_features]\n input_x_demo = history_demo.reshape(1,50)\n input_x_demo = np.repeat(input_x_demo, n_out, axis=0)\n input_x_demo = input_x_demo.reshape((1, n_out, 50))\n # forecast \n yhat = model.predict([input_x, input_x_demo], verbose=0)\n yhat1 = yhat[0]\n yhat2 = yhat[1]\n return yhat1, yhat2\n\ndef run_model(train_x, train_x_demo, train_y_1, train_y_2, test, train_ls, demo_ls,\n n_input, n_out, epochs = 80, batch_size = 128):\n # build and fit model\n model = build_and_fit(train_x, train_x_demo, train_y_1, train_y_2, n_input, n_out, epochs, batch_size)\n predictions_1 = [[] for i in range(50)]\n predictions_2 = [[] for i in range(50)]\n # forcase in n_input days at a time, total 28/n_input iterations\n for i in range(int(28/n_out)):\n # forcast each state\n for j in range(50):\n yhat_sequence_1, yhat_sequence_2 = forecast(model, train_ls[j], demo_ls[j], n_input, n_out)\n predictions_1[j].append(yhat_sequence_1)\n predictions_2[j].append(yhat_sequence_2)\n yhat = np.concatenate((yhat_sequence_1.reshape(-1,1), yhat_sequence_2.reshape(-1,1)), axis = 1)\n yhat = yhat.reshape(-1,n_out,2) #2 is the number of features; should use 7 to replace n_out\n #for s in range(int(n_input/7)):\n # train_ls[j] = np.vstack((train_ls[j], yhat[0][s*7:(s+1)*7].reshape(-1, 7, 2))) #7 is base length\n # add newly predicted data to train data for next round of predictions\n train_ls[j] = np.vstack((train_ls[j], yhat))\n return predictions_1, predictions_2, model", "sub_path": "LSTM/seq2seq_attention.py", "file_name": "seq2seq_attention.py", "file_ext": "py", "file_size_in_byte": 6419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "keras.layers.Input", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.TimeDistributed", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 48, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 52, "usage_type": "name"}, {"api_name": "keras.layers.Dropout", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 53, "usage_type": "name"}, {"api_name": "keras.layers.LSTM", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 56, "usage_type": "name"}, {"api_name": "keras.layers.RepeatVector", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 57, "usage_type": "name"}, {"api_name": "keras.layers.LSTM", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 58, "usage_type": "name"}, {"api_name": "keras.layers.dot", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 60, "usage_type": "name"}, {"api_name": "keras.layers.Activation", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 61, "usage_type": "name"}, {"api_name": "keras.layers.dot", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 62, "usage_type": "name"}, {"api_name": "keras.layers.Concatenate", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 63, "usage_type": "name"}, {"api_name": "keras.layers.Concatenate", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 64, "usage_type": "name"}, {"api_name": "keras.layers.TimeDistributed", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 65, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 66, "usage_type": "name"}, {"api_name": "keras.layers.TimeDistributed", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 67, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 68, "usage_type": "name"}, {"api_name": "keras.layers.LSTM", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 72, "usage_type": "name"}, {"api_name": "keras.layers.RepeatVector", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 73, "usage_type": "name"}, {"api_name": "keras.layers.LSTM", "line_number": 74, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 74, "usage_type": "name"}, {"api_name": "keras.layers.dot", "line_number": 76, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 76, "usage_type": "name"}, {"api_name": "keras.layers.Activation", "line_number": 77, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 77, "usage_type": "name"}, {"api_name": "keras.layers.dot", "line_number": 78, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 78, "usage_type": "name"}, {"api_name": "keras.layers.Concatenate", "line_number": 79, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 79, "usage_type": "name"}, {"api_name": "keras.layers.Concatenate", "line_number": 80, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 80, "usage_type": "name"}, {"api_name": "keras.layers.TimeDistributed", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 81, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 82, "usage_type": "name"}, {"api_name": "keras.layers.TimeDistributed", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 83, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 84, "usage_type": "name"}, {"api_name": "keras.models.Model", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "348657562", "text": "\"\"\" K-Means Clustering - Customer Segmentatino \"\"\"\nimport numpy as np\nimport pandas as pd\nfrom IPython.display import display\n\ncust_df = pd.read_csv(\"Cust_Segmentation.csv\")\n#display(cust_df.head())\ndf=cust_df.drop('Address', axis=1) #dropping adress as not categorical data\ndisplay(df.head())\n\nfrom sklearn.preprocessing import StandardScaler\nX = df.values[:,1:]\nX = np.nan_to_num(X) #normalising data\nClus_dataSet = StandardScaler().fit_transform(X)\n#display(Clus_dataSet)\n\n#MODELLING - Applying kmeans\nfrom sklearn.cluster import KMeans\nclusterNum = 3\nk_means = KMeans(init = \"k-means++\", n_clusters = clusterNum, n_init = 12)\nk_means.fit(X)\nlabels = k_means.labels_\n#print(labels)\n\n#INSIGHTS\ndf[\"Clus_km\"] = labels #assigning labels to each row in the df\n#display(df.head(5))\ndisplay(df.groupby('Clus_km').mean()) #averages features of each cluster to check center value\n\n#analysing distribution of customer age / income\nimport matplotlib.pyplot as plt\narea = np.pi * (X[:, 1])*(X[:, 1])\nplt.scatter(X[:,0], X[:,3], s=area, c=labels.astype(np.float), alpha=0.5)\nplt.xlabel('Age', fontsize=16)\nplt.ylabel('Income', fontsize=16)\nplt.savefig(\"kmeans_scatter.pdf\")\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfig = plt.figure(1, figsize=(8,6))\nplt.clf() #clears current fig\nax = Axes3D(fig, rect=[0,0,.95,1], elev=48, azim=134) #azim: azimuthal viewing angleA\nplt.cla() #clears axes\n\nax.set_xlabel('Education')\nax.set_ylabel('Age')\nax.set_zlabel('Income')\nax.scatter(X[:,1], X[:,0], X[:,3], c=labels.astype(np.float))\nplt.savefig(\"kmeans_3D.pdf\")\n\n", "sub_path": "kmeans.py", "file_name": "kmeans.py", "file_ext": "py", "file_size_in_byte": 1545, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "IPython.display.display", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 20, "usage_type": "call"}, {"api_name": "IPython.display.display", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 32, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 33, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "mpl_toolkits.mplot3d.Axes3D", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 47, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "402056948", "text": "# code utf-8\nfrom collections import defaultdict\n\n#求和的lambda表达式,很常用\nf=lambda x,y,z:x+y+z\nprint(f(1,3,4))\n\nurm = defaultdict(lambda :defaultdict(int))\n\nurm['a']['a']=1\nurm['a']['b']=2\nurm['a']['c']=3\nurm['a']['d']=4\nurm['a']['e']=5\nurm['a']['f']=6\nurm['a']['g']=7\n\nprint(urm)\n", "sub_path": "study/lambda.py", "file_name": "lambda.py", "file_ext": "py", "file_size_in_byte": 295, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "collections.defaultdict", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "421342792", "text": "import re\nimport logging\nfrom lxml import etree\nimport requests\nfrom requests.auth import HTTPBasicAuth\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom .models import HIEProfile\nfrom ..accounts.models import UserProfile\n\n\nlogger = logging.getLogger('smh_debug')\n\n__author__ = \"Alan Viars\"\n\n\ndef write_key_to_filepath(filepath, env_to_write):\n # try and open the local file. Create it from an env var if it doesn't exist.\n # return the filepath\n try:\n f = open(filepath, 'r')\n f.close()\n except FileNotFoundError:\n f = open(filepath, 'w')\n f.write(env_to_write)\n f.close()\n return filepath\n\n\nNAMESPACES = {\n 'hl7': \"urn:hl7-org:v3\",\n 'enrollment': \"http://www.intersystems.com/hs/portal/enrollment\",\n}\n\n\ndef fetch_patient_data(user, hie_profile=None, user_profile=None):\n \"\"\"do what we need to do to fetch patient data from HIXNY, if possible, for the given user.\n returns values that can be used to update the user's HIEProfile\n \"\"\"\n logger.debug(\"fetch_patient_data(%r, hie_profile=%r, user_profile=%r)\"\n % (user, hie_profile, user_profile))\n result = {'responses': []}\n\n if hie_profile is None:\n hie_profile, created = HIEProfile.objects.get(user=user)\n if user_profile is None:\n user_profile, created = UserProfile.objects.get(user=user)\n\n if hie_profile.flag_dont_connect:\n result['cda_content'] = hie_profile.cda_content\n result['fhir_content'] = hie_profile.fhir_content\n else:\n # acquire an access token from the HIXNY server\n auth_response = acquire_access_token()\n if auth_response['error_message'] is not None:\n result['error'] = auth_response['error_message']\n return result\n access_token = auth_response['access_token']\n\n # if the member hasn't been enrolled (no HIEProfile.mrn), try to enroll\n if not hie_profile.mrn:\n logger.debug(\"No MRN\")\n # try to find the member\n search_data = patient_search(access_token, user_profile)\n if 'response_body' in search_data:\n result['responses'].append(search_data['response_body'])\n\n if search_data.get('mrn'):\n # member found, already has portal account\n hie_profile.mrn = search_data['mrn']\n hie_profile.save()\n\n elif not (\n search_data.get('error')\n or search_data.get('status') == 'ERROR'\n and search_data.get('notice')\n ):\n # member found\n hie_profile.terms_accepted = search_data.get('terms_accepted')\n hie_profile.terms_string = search_data.get('terms_string')\n hie_profile.stageuser_password = search_data.get(\n 'stageuser_password')\n hie_profile.stageuser_token = search_data.get(\n 'stageuser_token')\n hie_profile.save()\n\n # try to stage/activate the member\n activated_member_data = activate_staged_user(\n access_token, hie_profile, user_profile\n )\n print('activated_member_data:', activated_member_data)\n if 'response_body' in activated_member_data:\n result['responses'].append(\n activated_member_data['response_body'])\n\n if (\n activated_member_data.get('mrn')\n and activated_member_data['status'] == 'success'\n ):\n hie_profile.mrn = activated_member_data['mrn']\n hie_profile.save()\n\n print(\n {k: v for k, v in hie_profile.__dict__.items() if k[0] != '_'})\n\n # if the consumer directive checks out, get the clinical data and store\n # it\n logger.debug(\"MRN Set\")\n directive = consumer_directive(access_token, hie_profile, user_profile)\n if 'response_body' in directive:\n result['responses'].append(directive['response_body'])\n\n if directive['status'] == \"OK\":\n document_data = get_clinical_document(access_token, hie_profile)\n if 'response_body' in document_data:\n result['responses'].append(document_data['response_body'])\n\n result['cda_content'] = document_data['cda_content']\n result['fhir_content'] = document_data['fhir_content']\n else:\n result['error'] = \"Clinical data could not be loaded.\"\n if settings.DEBUG and directive.get('error'):\n result['error'] += \" (%s)\" % directive['error'] or ''\n\n logger.debug(\"result = %r\" % (result))\n return result\n\n\ndef acquire_access_token():\n \"\"\"establish a connection to the HIXNY service;\n returns JSON containing an access token on successful connection\n \"\"\"\n data = {\n \"grant_type\": \"password\",\n \"username\": settings.HIE_WORKBENCH_USERNAME,\n \"password\": settings.HIE_WORKBENCH_PASSWORD,\n \"scope\": \"/PHRREGISTER\",\n }\n response = requests.post(\n settings.HIE_TOKEN_API_URI,\n cert=(\n write_key_to_filepath(\n settings.HIE_CLIENT_CERT_FILEPATH, settings.HIE_CLIENT_CERT\n ),\n write_key_to_filepath(\n settings.HIE_CLIENT_PRIVATE_KEY_FILEPATH,\n settings.HIE_CLIENT_PRIVATE_KEY,\n ),\n ),\n data=data,\n verify=False,\n auth=HTTPBasicAuth(settings.HIE_BASIC_AUTH_USERNAME,\n settings.HIE_BASIC_AUTH_PASSWORD),\n )\n response_json = response.json()\n logger.debug(response_json)\n if 'access_token' not in response_json:\n access_token = None\n error_message = _(\n \"We're sorry. We could not connect to HIE. Please try again later.\"\n )\n if settings.DEBUG is True:\n error_message += \" DATA=%s response=%s TOKEN_URI=%s BASIC_AUTH=%s\" % (\n data,\n response_json,\n settings.HIE_TOKEN_API_URI,\n settings.HIE_BASIC_AUTH_PASSWORD,\n )\n else:\n access_token = response_json['access_token']\n error_message = None\n\n return {'access_token': access_token, 'error_message': error_message}\n\n\ndef patient_search(access_token, user_profile):\n \"\"\"search for a patient with the given profile; if found, return \"\"\"\n # If paitent was created before verifying email added, append default\n auditEmail = \"\"\n if user_profile.verifying_agent_email == \"\":\n auditEmail = settings.HIE_WORKBENCH_USERNAME\n else:\n auditEmail = user_profile.verifying_agent_email\n\n patient_search_xml = \"\"\"\n \n %s\n %s\n %s\n %s\n %s\n \n \n \n \n \n \n \n \n \n \n %s\n 1\n \n \"\"\" % (\n user_profile.gender_intersystems,\n user_profile.birthdate_intersystems,\n user_profile.user.last_name,\n user_profile.user.first_name,\n user_profile.middle_name,\n auditEmail,\n )\n logger.debug(\"patient search payload = %r\" % (patient_search_xml))\n\n response = requests.post(\n settings.HIE_PHRREGISTER_API_URI,\n cert=(\n write_key_to_filepath(\n settings.HIE_CLIENT_CERT_FILEPATH, settings.HIE_CLIENT_CERT\n ),\n write_key_to_filepath(\n settings.HIE_CLIENT_PRIVATE_KEY_FILEPATH,\n settings.HIE_CLIENT_PRIVATE_KEY,\n ),\n ),\n verify=False,\n headers={\n 'Content-Type': 'application/xml',\n 'Authorization': \"Bearer %s\" % (access_token),\n },\n data=patient_search_xml,\n )\n\n response_xml = etree.XML(response.content)\n result = {\"response_body\": etree.tounicode(\n response_xml, pretty_print=True)}\n logger.debug(\"response body = %r\" % (result['response_body']))\n\n for element in response_xml:\n if element.tag == \"{%(hl7)s}Notice\" % NAMESPACES:\n result['error'] = element.text\n for e in element.getchildren():\n if e.tag == \"{%(hl7)s}Status\" % NAMESPACES:\n result['status'] = e.text\n if e.tag == \"{%(hl7)s}Notice\" % NAMESPACES:\n result['notice'] = e.text\n if \"ERROR #5001\" in result['notice']:\n match_data = re.search(\n r'MRN[:=] ?([0-9]+)\\b', result['notice'])\n if match_data:\n result['mrn'] = match_data.group(1)\n if e.tag == \"{%(hl7)s}TERMSACCEPTED\" % NAMESPACES:\n result['terms_accepted'] = e.text\n if e.tag == \"{%(enrollment)s}TermsString\" % NAMESPACES:\n # the content of TermsString is html\n e.tag = 'TermsString' # get rid of namespaces\n terms_string = ''.join(\n [etree.tounicode(ch, method='xml') for ch in e])\n result['terms_string'] = terms_string\n if e.tag == \"{%(hl7)s}StageUserPassword\" % NAMESPACES:\n result['stageuser_password'] = e.text\n if e.tag == \"{%(hl7)s}StageUserToken\" % NAMESPACES:\n result['stageuser_token'] = e.text\n\n return result\n\n\ndef activate_staged_user(access_token, hie_profile, user_profile):\n \"\"\"try to activate the member with HIXNY;\n if successful, returns MRN\n \"\"\"\n activate_xml = \"\"\"\n \n %s\n %s\n %s\n %d\n \n \"\"\" % (\n user_profile.birthdate_intersystems,\n hie_profile.stageuser_token,\n hie_profile.stageuser_password,\n hie_profile.consent_to_share_data,\n )\n # print(activate_xml)\n\n response = requests.post(\n settings.HIE_ACTIVATESTAGEDUSER_API_URI,\n cert=(\n write_key_to_filepath(\n settings.HIE_CLIENT_CERT_FILEPATH, settings.HIE_CLIENT_CERT\n ),\n write_key_to_filepath(\n settings.HIE_CLIENT_PRIVATE_KEY_FILEPATH,\n settings.HIE_CLIENT_PRIVATE_KEY,\n ),\n ),\n verify=False,\n headers={\n 'Content-Type': 'application/xml',\n 'Authorization': \"Bearer %s\" % (access_token),\n },\n data=activate_xml,\n )\n\n response_content = response.content.decode('utf-8')\n response_xml = etree.XML(response.content)\n\n result = {\"response_body\": etree.tounicode(\n response_xml, pretty_print=True)}\n # print(result['response_body'])\n\n mrn_elements = response_xml.xpath(\n \"//hl7:ActivatedUserMrn\", namespaces=NAMESPACES)\n mrn_match = re.search(r\"ActivatedUserMrn>(\\d+)<\", response_content)\n\n if len(mrn_elements) > 0:\n mrn_element = mrn_elements[0]\n # print('mrn_element =', etree.tounicode(mrn_element))\n result.update(\n status='success',\n mrn=etree.tounicode(mrn_element, method='text',\n with_tail=False).strip(),\n )\n elif mrn_match is not None:\n # print('mrn_match =', mrn_match)\n result.update(status='success', mrn=mrn_match.group(1))\n else:\n result.update(\n status='failure', mrn=None, error='Could not activate staged user.'\n )\n\n return result\n\n\ndef consumer_directive(access_token, hie_profile, user_profile):\n \"\"\"post to the consumer directive API to determine the member's consumer directive;\n returns data containing the status and any notice.\n \"\"\"\n if not hie_profile.consent_to_share_data:\n result = {\n 'status': 'ERROR',\n 'notice': 'Member has not consented to share data, cannot submit consumer directive.',\n }\n elif not hie_profile.mrn:\n result = {\n 'status': 'ERROR',\n 'notice': 'Member MRN not set, cannot submit consumer directive.',\n }\n else:\n consumer_directive_xml = \"\"\"\n \n %s\n %s\n %s\n %d\n \n \"\"\" % (\n hie_profile.mrn,\n user_profile.birthdate_intersystems,\n hie_profile.data_requestor,\n hie_profile.consent_to_share_data,\n )\n # print(consumer_directive_xml)\n\n response = requests.post(\n settings.HIE_CONSUMERDIRECTIVE_API_URI,\n cert=(\n write_key_to_filepath(\n settings.HIE_CLIENT_CERT_FILEPATH, settings.HIE_CLIENT_CERT\n ),\n write_key_to_filepath(\n settings.HIE_CLIENT_PRIVATE_KEY_FILEPATH,\n settings.HIE_CLIENT_PRIVATE_KEY,\n ),\n ),\n verify=False,\n headers={\n 'Content-Type': 'application/xml',\n 'Authorization': \"Bearer %s\" % (access_token),\n },\n data=consumer_directive_xml,\n )\n response_xml = etree.XML(response.content)\n result = {\"response_body\": etree.tounicode(\n response_xml, pretty_print=True)}\n # print(result['response_body'])\n\n result.update(\n status=''.join(\n response_xml.xpath(\"hl7:Status/text()\", namespaces=NAMESPACES)\n ),\n notice=''.join(\n response_xml.xpath(\"hl7:Notice/text()\", namespaces=NAMESPACES)\n ),\n )\n if result['status'] == 'ERROR':\n result['error'] = result['notice']\n\n return result\n\n\ndef get_clinical_document(access_token, hie_profile):\n \"\"\"get member's clinical data from HIXNY (CDA XML), convert to FHIR (JSON), return both.\n \"\"\"\n request_xml = \"\"\"\n \n %s\n %s\n \n \"\"\" % (\n hie_profile.mrn,\n hie_profile.data_requestor,\n )\n # print(request_xml)\n\n response = requests.post(\n settings.HIE_GETDOCUMENT_API_URI,\n cert=(\n write_key_to_filepath(\n settings.HIE_CLIENT_CERT_FILEPATH, settings.HIE_CLIENT_CERT\n ),\n write_key_to_filepath(\n settings.HIE_CLIENT_PRIVATE_KEY_FILEPATH,\n settings.HIE_CLIENT_PRIVATE_KEY,\n ),\n ),\n verify=False,\n headers={\n 'Content-Type': 'application/xml',\n 'Authorization': \"Bearer %s\" % (access_token),\n },\n data=request_xml,\n )\n response_xml = etree.XML(response.content)\n\n result = {\"response_body\": etree.tounicode(\n response_xml, pretty_print=True)}\n\n cda_element = response_xml.find(\"{%(hl7)s}ClinicalDocument\" % NAMESPACES)\n if cda_element is not None:\n cda_content = etree.tounicode(cda_element)\n fhir_content = cda2fhir(cda_content).decode('utf-8')\n result.update(cda_content=cda_content, fhir_content=fhir_content)\n else:\n result.update(cda_content='', fhir_content='')\n\n return result\n\n\ndef cda2fhir(cda_content):\n \"\"\"use the CDA2FHIR service to convert CDA XML to FHIR JSON\"\"\"\n response = requests.post(\n settings.CDA2FHIR_SERVICE_URL,\n data=cda_content,\n headers={'Content-Type': 'application/xml'},\n )\n fhir_content = response.content\n return fhir_content\n", "sub_path": "apps/hie/hixny_requests.py", "file_name": "hixny_requests.py", "file_ext": "py", "file_size_in_byte": 16376, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "models.HIEProfile.objects.get", "line_number": 45, "usage_type": "call"}, {"api_name": "models.HIEProfile.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.HIEProfile", "line_number": 45, "usage_type": "name"}, {"api_name": "accounts.models.UserProfile.objects.get", "line_number": 47, "usage_type": "call"}, {"api_name": "accounts.models.UserProfile.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "accounts.models.UserProfile", "line_number": 47, "usage_type": "name"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 122, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 122, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_WORKBENCH_USERNAME", "line_number": 135, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 135, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_WORKBENCH_PASSWORD", "line_number": 136, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 136, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 139, "usage_type": "call"}, {"api_name": "django.conf.settings.HIE_TOKEN_API_URI", "line_number": 140, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 140, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_CERT_FILEPATH", "line_number": 143, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 143, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_CERT", "line_number": 143, "usage_type": "attribute"}, {"api_name": "django.conf.settings.HIE_CLIENT_PRIVATE_KEY_FILEPATH", "line_number": 146, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 146, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_PRIVATE_KEY", "line_number": 147, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 147, "usage_type": "name"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 152, "usage_type": "call"}, {"api_name": "django.conf.settings.HIE_BASIC_AUTH_USERNAME", "line_number": 152, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 152, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_BASIC_AUTH_PASSWORD", "line_number": 153, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 153, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 159, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 162, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 162, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_TOKEN_API_URI", "line_number": 166, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 166, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_BASIC_AUTH_PASSWORD", "line_number": 167, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 167, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_WORKBENCH_USERNAME", "line_number": 181, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 181, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 215, "usage_type": "call"}, {"api_name": "django.conf.settings.HIE_PHRREGISTER_API_URI", "line_number": 216, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 216, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_CERT_FILEPATH", "line_number": 219, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 219, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_CERT", "line_number": 219, "usage_type": "attribute"}, {"api_name": "django.conf.settings.HIE_CLIENT_PRIVATE_KEY_FILEPATH", "line_number": 222, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 222, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_PRIVATE_KEY", "line_number": 223, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 223, "usage_type": "name"}, {"api_name": "lxml.etree.XML", "line_number": 234, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 234, "usage_type": "name"}, {"api_name": "lxml.etree.tounicode", "line_number": 235, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 235, "usage_type": "name"}, {"api_name": "re.search", "line_number": 248, "usage_type": "call"}, {"api_name": "lxml.etree.tounicode", "line_number": 258, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 258, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 287, "usage_type": "call"}, {"api_name": "django.conf.settings.HIE_ACTIVATESTAGEDUSER_API_URI", "line_number": 288, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 288, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_CERT_FILEPATH", "line_number": 291, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 291, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_CERT", "line_number": 291, "usage_type": "attribute"}, {"api_name": "django.conf.settings.HIE_CLIENT_PRIVATE_KEY_FILEPATH", "line_number": 294, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 294, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_PRIVATE_KEY", "line_number": 295, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 295, "usage_type": "name"}, {"api_name": "lxml.etree.XML", "line_number": 307, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 307, "usage_type": "name"}, {"api_name": "lxml.etree.tounicode", "line_number": 309, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 309, "usage_type": "name"}, {"api_name": "re.search", "line_number": 315, "usage_type": "call"}, {"api_name": "lxml.etree.tounicode", "line_number": 322, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 322, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 366, "usage_type": "call"}, {"api_name": "django.conf.settings.HIE_CONSUMERDIRECTIVE_API_URI", "line_number": 367, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 367, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_CERT_FILEPATH", "line_number": 370, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 370, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_CERT", "line_number": 370, "usage_type": "attribute"}, {"api_name": "django.conf.settings.HIE_CLIENT_PRIVATE_KEY_FILEPATH", "line_number": 373, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 373, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_PRIVATE_KEY", "line_number": 374, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 374, "usage_type": "name"}, {"api_name": "lxml.etree.XML", "line_number": 384, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 384, "usage_type": "name"}, {"api_name": "lxml.etree.tounicode", "line_number": 385, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 385, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 417, "usage_type": "call"}, {"api_name": "django.conf.settings.HIE_GETDOCUMENT_API_URI", "line_number": 418, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 418, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_CERT_FILEPATH", "line_number": 421, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 421, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_CERT", "line_number": 421, "usage_type": "attribute"}, {"api_name": "django.conf.settings.HIE_CLIENT_PRIVATE_KEY_FILEPATH", "line_number": 424, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 424, "usage_type": "name"}, {"api_name": "django.conf.settings.HIE_CLIENT_PRIVATE_KEY", "line_number": 425, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 425, "usage_type": "name"}, {"api_name": "lxml.etree.XML", "line_number": 435, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 435, "usage_type": "name"}, {"api_name": "lxml.etree.tounicode", "line_number": 437, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 437, "usage_type": "name"}, {"api_name": "lxml.etree.tounicode", "line_number": 442, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 442, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 453, "usage_type": "call"}, {"api_name": "django.conf.settings.CDA2FHIR_SERVICE_URL", "line_number": 454, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 454, "usage_type": "name"}]} +{"seq_id": "13563342", "text": "\"\"\"\n@作者: egon老湿\n@微信:18611453110\n@专栏: https://zhuanlan.zhihu.com/c_1189883314197168128\n\"\"\"\n# 服务端应该满足两个特点:\n# 1、一直对外提供服务\n# 2、并发地服务多个客户端\nimport subprocess\nimport struct\nimport json\nfrom socket import *\n\nserver=socket(AF_INET,SOCK_STREAM)\nserver.setsockopt(SOL_SOCKET,SO_REUSEADDR,1) #就是它,在bind前加\nserver.bind(('127.0.0.1',8083))\nserver.listen(5)\n\n# 服务端应该做两件事\n# 第一件事:循环地从板连接池中取出链接请求与其建立双向链接,拿到链接对象\nwhile True:\n conn,client_addr=server.accept()\n\n # 第二件事:拿到链接对象,与其进行通信循环\n while True:\n try:\n cmd=conn.recv(1024)\n if len(cmd) == 0:break\n obj=subprocess.Popen(cmd.decode('utf-8'),\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n\n stdout_res=obj.stdout.read()\n stderr_res=obj.stderr.read()\n total_size=len(stdout_res)+len(stderr_res)\n\n # 1、制作头\n header_dic={\n \"filename\":\"a.txt\",\n \"total_size\":total_size,\n \"md5\":\"123123xi12ix12\"\n }\n\n json_str = json.dumps(header_dic)\n json_str_bytes = json_str.encode('utf-8')\n\n\n # 2、先把头的长度发过去\n x=struct.pack('i',len(json_str_bytes))\n conn.send(x)\n\n # 3、发头信息\n conn.send(json_str_bytes)\n # 4、再发真实的数据\n conn.send(stdout_res)\n conn.send(stderr_res)\n\n except Exception:\n break\n conn.close()\n\n\n\n\n", "sub_path": "day37/04 解决粘包问题(终极版)/服务端.py", "file_name": "服务端.py", "file_ext": "py", "file_size_in_byte": 1810, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "subprocess.Popen", "line_number": 29, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 31, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 46, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "154150790", "text": "import dataclasses\n\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\n\nRELEASE_PHASE_ALPHA = \"alpha\"\nRELEASE_PHASE_BETA = \"beta\"\nRELEASE_PHASE_RC = \"rc\"\nRELEASE_PHASE_PREVIEW = \"preview\"\nRELEASE_PHASE_POST = \"post\"\nRELEASE_PHASE_REV = \"rev\"\nRELEASE_PHASE_DEV = \"dev\"\nRELEASE_PHASES = {\n RELEASE_PHASE_ALPHA: \"a\",\n RELEASE_PHASE_BETA: \"b\",\n RELEASE_PHASE_RC: \"c\",\n RELEASE_PHASE_PREVIEW: \"pre\",\n RELEASE_PHASE_POST: \"-\", # shorthand of 1.2.3-post1 is 1.2.3-1\n RELEASE_PHASE_REV: \"r\",\n RELEASE_PHASE_DEV: \"dev\",\n}\nRELEASE_PHASES_SHORT = {v: k for k, v in RELEASE_PHASES.items() if k != \"post\"}\n\n\n@dataclasses.dataclass(frozen=True, eq=True, order=True)\nclass Release:\n major: int = dataclasses.field(default=0, compare=False)\n minor: Optional[int] = dataclasses.field(default=None, compare=False)\n patch: Optional[int] = dataclasses.field(default=None, compare=False)\n # some projects use non-semver versioning schemes, eg: 1.2.3.4\n extra: Optional[Union[int, Tuple[int, ...]]] = dataclasses.field(\n default=None, compare=False\n )\n precision: int = dataclasses.field(default=None, init=False, compare=False)\n text: str = dataclasses.field(default=None, init=False, compare=False)\n _compare_key: Tuple[int, ...] = dataclasses.field(\n default=None, init=False, compare=True\n )\n\n def __post_init__(self) -> None:\n if self.extra is None:\n object.__setattr__(self, \"extra\", ())\n elif not isinstance(self.extra, tuple):\n object.__setattr__(self, \"extra\", (self.extra,))\n\n parts = list(\n map(\n str,\n filter(\n lambda x: x is not None,\n [self.major, self.minor, self.patch, *self.extra],\n ),\n )\n )\n object.__setattr__(self, \"text\", \".\".join(parts))\n object.__setattr__(self, \"precision\", len(parts))\n object.__setattr__(\n self,\n \"_compare_key\",\n (self.major, self.minor or 0, self.patch or 0, *self.extra),\n )\n\n @classmethod\n def from_parts(cls, *parts: int) -> \"Release\":\n if not parts:\n return cls()\n\n return cls(\n major=parts[0],\n minor=parts[1] if len(parts) > 1 else None,\n patch=parts[2] if len(parts) > 2 else None,\n extra=parts[3:] if len(parts) > 3 else (),\n )\n\n def to_string(self) -> str:\n return self.text\n\n def next_major(self) -> \"Release\":\n return dataclasses.replace(\n self,\n major=self.major + 1,\n minor=0 if self.minor is not None else None,\n patch=0 if self.patch is not None else None,\n extra=tuple(0 for _ in self.extra),\n )\n\n def next_minor(self) -> \"Release\":\n return dataclasses.replace(\n self,\n major=self.major,\n minor=self.minor + 1 if self.minor is not None else 1,\n patch=0 if self.patch is not None else None,\n extra=tuple(0 for _ in self.extra),\n )\n\n def next_patch(self) -> \"Release\":\n return dataclasses.replace(\n self,\n major=self.major,\n minor=self.minor if self.minor is not None else 0,\n patch=self.patch + 1 if self.patch is not None else 1,\n extra=tuple(0 for _ in self.extra),\n )\n\n\n@dataclasses.dataclass(frozen=True, eq=True, order=True)\nclass ReleaseTag:\n phase: str\n number: int = dataclasses.field(default=0)\n\n def __post_init__(self) -> None:\n object.__setattr__(self, \"phase\", self.expand(self.phase))\n\n @classmethod\n def shorten(cls, phase: str) -> str:\n return RELEASE_PHASES.get(phase, phase)\n\n @classmethod\n def expand(cls, phase: str) -> str:\n return RELEASE_PHASES_SHORT.get(phase, phase)\n\n def to_string(self, short: bool = False) -> str:\n if short:\n return f\"{self.shorten(self.phase)}{self.number}\"\n return f\"{self.phase}.{self.number}\"\n\n def next(self) -> \"ReleaseTag\":\n return dataclasses.replace(self, phase=self.phase, number=self.number + 1)\n\n def next_phase(self) -> Optional[\"ReleaseTag\"]:\n if self.phase in [\n RELEASE_PHASE_POST,\n RELEASE_PHASE_RC,\n RELEASE_PHASE_REV,\n RELEASE_PHASE_DEV,\n ]:\n return None\n\n if self.phase == RELEASE_PHASE_ALPHA:\n _phase = RELEASE_PHASE_BETA\n elif self.phase == RELEASE_PHASE_BETA:\n _phase = RELEASE_PHASE_RC\n else:\n return None\n\n return self.__class__(phase=_phase, number=0)\n\n\nLocalSegmentType = Optional[Union[str, int, Tuple[Union[str, int], ...]]]\n", "sub_path": "src/poetry/core/version/pep440/segments.py", "file_name": "segments.py", "file_ext": "py", "file_size_in_byte": 4773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "dataclasses.field", "line_number": 29, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 30, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 31, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 31, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 33, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 33, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 36, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 37, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 38, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 38, "usage_type": "call"}, {"api_name": "dataclasses.replace", "line_number": 81, "usage_type": "call"}, {"api_name": "dataclasses.replace", "line_number": 90, "usage_type": "call"}, {"api_name": "dataclasses.replace", "line_number": 99, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 27, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 111, "usage_type": "call"}, {"api_name": "dataclasses.replace", "line_number": 130, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 132, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 108, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 151, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 151, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 151, "usage_type": "name"}]} +{"seq_id": "539105814", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.feature_selection import f_regression\n\ndef adj_r2(x,y): # ? A function that returns the adjusted r_squared based on two data variables;\n r2 = reg.score(x,y)\n n = x.shape[0]\n p = x.shape[1]\n adjusted_r2 = 1-(1-r2)*(n-1)/(n-p-1)\n return adjusted_r2\n\ndata = pd.read_csv('real_estate_price_size_year_2.csv')\n\nx = data[['size','year']]\ny = data['price']\n\nreg = LinearRegression()\nreg.fit(x,y)\n\ninterc = reg.intercept_ # ? Interception value for the multiple regression\ncoefic = reg.coef_ # ? Coefficient of each variable\nr_sqrd = reg.score(x,y) # ? R-Squared\n\nadj_r_sqrd = adj_r2(x,y)\n\n# ? Now I'll be performing multiple simple linear regressions in order to check variables usefulness\n\np_values = f_regression(x,y)[1]\np_values = p_values.round(3) #? array([0. , 0.357]) the second variable \"year\" is not important at all.\n\nreg_summary = pd.DataFrame(data = x.columns.values,columns=['Features'])\nreg_summary ['Coefficients'] = reg.coef_\nreg_summary ['P-values'] = p_values\n\n\"\"\"\n Features Coefficients P-values\n0 size 227.700854 0.000\n1 year 2916.785327 0.357\n\n\"\"\"\n# ! Study conclusion: year isn't relevant and it should be removed from the model;\n", "sub_path": "scikit_basic/multiple_linear_regression_exercise.py", "file_name": "multiple_linear_regression_exercise.py", "file_ext": "py", "file_size_in_byte": 1352, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "seaborn.set", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.f_regression", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "76747445", "text": "import pandas as pd\nimport numpy as np\nimport json\nfrom statsmodels.tsa.api import VAR\nimport time\nimport sys\nimport os\nroot_path = os.path.abspath(__file__)\nroot_path = '/'.join(root_path.split('/')[:-2])\nsys.path.append(root_path)\nfrom libcity.utils import StandardScaler\nfrom libcity.model.loss import masked_mae_np, masked_mape_np, masked_mse_np, masked_rmse_np, r2_score_np, explained_variance_score_np\n\nconfig = {\n 'dataset': 'METR_LA',\n 'train_rate': 0.7,\n 'eval_rate': 0.1,\n 'input_window': 12,\n 'output_windows': 3,\n 'maxlags': 1,\n 'metrics': ['masked_MAE', 'masked_MSE', 'masked_RMSE', 'masked_MAPE', 'MAE', 'MSE', 'RMSE', 'MAPE', 'R2', 'EVAR']\n}\n\n\ndef preprocess_data(data):\n train_rate = config.get('train_rate', 0.7)\n eval_rate = config.get('eval_rate', 0.1)\n\n input_window = config.get('input_window', 12)\n output_window = config.get('output_window', 3)\n\n x, y = [], []\n for i in range(len(data) - input_window - output_window):\n a = data[i: i + input_window + output_window]\n x.append(a[0: input_window])\n y.append(a[input_window: input_window + output_window])\n x = np.array(x)\n y = np.array(y)\n\n train_size = int(x.shape[0] * (train_rate + eval_rate))\n trainX = x[:train_size]\n trainY = y[:train_size]\n testX = x[train_size:x.shape[0]]\n testY = y[train_size:x.shape[0]]\n return trainX, trainY, testX, testY\n\n\ndef get_data(dataset):\n # path\n path = 'raw_data/' + dataset + '/'\n config_path = path + 'config.json'\n dyna_path = path + dataset + '.dyna'\n geo_path = path + dataset + '.geo'\n\n # read config\n with open(config_path, 'r') as f:\n json_obj = json.load(f)\n for key in json_obj:\n if key not in config:\n config[key] = json_obj[key]\n\n # read geo\n geo_file = pd.read_csv(geo_path)\n geo_ids = list(geo_file['geo_id'])\n\n # read dyna\n dyna_file = pd.read_csv(dyna_path)\n data_col = config.get('data_col', '')\n if data_col != '': # 根据指定的列加载数据集\n if isinstance(data_col, list):\n data_col = data_col.copy()\n else: # str\n data_col = [data_col].copy()\n data_col.insert(0, 'time')\n data_col.insert(1, 'entity_id')\n dyna_file = dyna_file[data_col]\n else: # 不指定则加载所有列\n dyna_file = dyna_file[dyna_file.columns[2:]] # 从time列开始所有列\n\n # 求时间序列\n time_slots = list(dyna_file['time'][:int(dyna_file.shape[0] / len(geo_ids))])\n\n # 转3-d数组\n feature_dim = len(dyna_file.columns) - 2\n df = dyna_file[dyna_file.columns[-feature_dim:]]\n len_time = len(time_slots)\n data = []\n for i in range(0, df.shape[0], len_time):\n data.append(df[i:i + len_time].values)\n data = np.array(data, dtype=float) # (num_nodes, len_time, feature_dim)\n data = data.swapaxes(0, 1) # (len_time, num_nodes, feature_dim)\n return data\n\n\ndef run_VAR(config, data, testX, testY):\n print(\"----begin training----\")\n ts, points = data.shape[:2]\n data = data.reshape(ts, -1)[:int(ts * 0.7)] + np.random.randn(int(ts * 0.7), points) / 10000\n scaler = StandardScaler(data.mean(), data.std())\n data = scaler.transform(data)\n\n s = time.time()\n model = VAR(data)\n maxlags = config.get(\"maxlag\", 1)\n results = model.fit(maxlags=maxlags, ic='aic')\n e = time.time()\n print(1, e - s)\n\n input_window = config.get('input_window', 12)\n output_window = config.get('output_window', 3)\n testX = np.array(testX)\n testY = np.array(testY)\n testX = testX[:len(testX) // points * points].reshape(-1, input_window, points)\n testY = testY[:len(testY) // points * points].reshape(-1, output_window, points)\n print(testX.shape, testY.shape) # B, T, N * F\n\n s = time.time()\n y_pred, y_true = [[] for i in range(output_window)], [[] for i in range(output_window)]\n for sample, target in zip(testX, testY):\n # print(sample.shape, target.shape) T, N * F\n sample = scaler.transform(sample[-maxlags:])\n out = results.forecast(sample, output_window)\n # print(out.shape) T, N * F\n out = scaler.inverse_transform(out)\n for i in range(output_window):\n y_pred[i].append(out[i])\n y_true[i].append(target[i])\n e = time.time()\n print(2, e - s)\n y_pred = np.array(y_pred) # T, B, N, F\n y_true = np.array(y_true)\n print(\"----end training-----\")\n return y_pred, y_true\n\n\ndef evaluate(result, testy):\n metrics = config.get('metrics',\n ['MAE', 'MAPE', 'MSE', 'RMSE', 'masked_MAE', 'masked_MAPE', 'masked_MSE', 'masked_RMSE', 'R2', 'EVAR'])\n df = []\n line = {}\n for metric in metrics:\n if metric == 'masked_MAE':\n line[metric] = masked_mae_np(result, testy, 0)\n elif metric == 'masked_MSE':\n line[metric] = masked_mse_np(result, testy, 0)\n elif metric == 'masked_RMSE':\n line[metric] = masked_rmse_np(result, testy, 0)\n elif metric == 'masked_MAPE':\n line[metric] = masked_mape_np(result, testy, 0)\n elif metric == 'MAE':\n line[metric] = masked_mae_np(result, testy)\n elif metric == 'MSE':\n line[metric] = masked_mse_np(result, testy)\n elif metric == 'RMSE':\n line[metric] = masked_rmse_np(result, testy)\n elif metric == 'MAPE':\n line[metric] = masked_mape_np(result, testy)\n elif metric == 'R2':\n line[metric] = r2_score_np(result, testy)\n elif metric == 'EVAR':\n line[metric] = explained_variance_score_np(result, testy)\n else:\n raise ValueError(\n 'Error parameter evaluator_mode={}.'.format(metric))\n df.append(line)\n\n df = pd.DataFrame(df, columns=metrics)\n print(df)\n df.to_csv(\"result.csv\")\n\n\ndef main():\n data = get_data(config.get('dataset', ''))\n trainX, trainY, testX, testY = preprocess_data(data)\n y_pred, y_true = run_VAR(config, data, testX, testY)\n evaluate(y_pred, y_true)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "test/test_VAR.py", "file_name": "test_VAR.py", "file_ext": "py", "file_size_in_byte": 6114, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.abspath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "json.load", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 98, "usage_type": "attribute"}, {"api_name": "libcity.utils.StandardScaler", "line_number": 99, "usage_type": "call"}, {"api_name": "time.time", "line_number": 102, "usage_type": "call"}, {"api_name": "statsmodels.tsa.api.VAR", "line_number": 103, "usage_type": "call"}, {"api_name": "time.time", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 112, "usage_type": "call"}, {"api_name": "time.time", "line_number": 117, "usage_type": "call"}, {"api_name": "time.time", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "libcity.model.loss.masked_mae_np", "line_number": 143, "usage_type": "call"}, {"api_name": "libcity.model.loss.masked_mse_np", "line_number": 145, "usage_type": "call"}, {"api_name": "libcity.model.loss.masked_rmse_np", "line_number": 147, "usage_type": "call"}, {"api_name": "libcity.model.loss.masked_mape_np", "line_number": 149, "usage_type": "call"}, {"api_name": "libcity.model.loss.masked_mae_np", "line_number": 151, "usage_type": "call"}, {"api_name": "libcity.model.loss.masked_mse_np", "line_number": 153, "usage_type": "call"}, {"api_name": "libcity.model.loss.masked_rmse_np", "line_number": 155, "usage_type": "call"}, {"api_name": "libcity.model.loss.masked_mape_np", "line_number": 157, "usage_type": "call"}, {"api_name": "libcity.model.loss.r2_score_np", "line_number": 159, "usage_type": "call"}, {"api_name": "libcity.model.loss.explained_variance_score_np", "line_number": 161, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 167, "usage_type": "call"}]} +{"seq_id": "188210867", "text": "#-\n# Copyright (c) 2016 Alfredo Mazzinghi\n# All rights reserved.\n#\n# This software was developed by SRI International and the University of\n# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237\n# (\"CTSRD\"), as part of the DARPA CRASH research programme.\n#\n# @BERI_LICENSE_HEADER_START@\n#\n# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor\n# license agreements. See the NOTICE file distributed with this work for\n# additional information regarding copyright ownership. BERI licenses this\n# file to you under the BERI Hardware-Software License, Version 1.0 (the\n# \"License\"); you may not use this file except in compliance with the\n# License. You may obtain a copy of the License at:\n#\n# http://www.beri-open-systems.org/legal/license-1-0.txt\n#\n# Unless required by applicable law or agreed to in writing, Work distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n#\n# @BERI_LICENSE_HEADER_END@\n#\n\n\"\"\"\nThis script produces a poiner provenance plot from a cheri trace file.\n\"\"\"\n\nimport argparse as ap\nimport sys\nimport logging\nimport cProfile\nimport pstats\n\nfrom cheriplot.core.tool import PlotTool\nfrom cheriplot.plot.provenance import (\n ProvenanceTreePlot, AddressMapCapCreatePlot, AddressMapCapDerefPlot,\n PointedAddressFrequencyPlot, SyscallAddressMapPlot)\n\nlogger = logging.getLogger(__name__)\n\nclass ProvenancePlotTool(PlotTool):\n\n description = \"Plot pointer provenance from cheri trace\"\n\n def init_arguments(self):\n super().init_arguments()\n\n self.parser.add_argument(\"-m\", \"--vmmap-file\",\n help=\"CSV file containing the VM map dump\"\n \" generated by procstat\")\n\n sub = self.parser.add_subparsers(title=\"plot\", help=\"plot-type --help\")\n tree = sub.add_parser(\"tree\",\n help=\"Draw the part of the provenance tree \"\n \"that contains a given capability\")\n tree.add_argument(\"cycle\", type=int,\n help=\"cycle number of the capability to find\")\n tree.set_defaults(handler=self._tree)\n\n asmap_bounds = sub.add_parser(\"asmap-bounds\",\n help=\"Draw address-map plot with \"\n \"capability bounds setting operations\")\n asmap_bounds.set_defaults(handler=self._asmap_bounds)\n asmap_deref = sub.add_parser(\"asmap-deref\",\n help=\"Draw address-map plot with \"\n \"capability dereferences\")\n asmap_deref.set_defaults(handler=self._asmap_deref)\n asmap_syscall = sub.add_parser(\"asmap-syscall\",\n help=\"Draw address-map plot with \"\n \" capabilities returned by system calls\")\n asmap_syscall.set_defaults(handler=self._asmap_syscall)\n pfreq = sub.add_parser(\"pfreq\", help=\"Draw frequency of reference plot\")\n pfreq.set_defaults(handler=self._pfreq)\n\n def _tree(self, args):\n plot = ProvenanceTreePlot(args.tree, args.trace, args.cache)\n plot.show()\n\n def _asmap_bounds(self, args):\n plot = AddressMapCapCreatePlot(args.trace, args.cache)\n if args.vmmap_file:\n plot.set_vmmap(args.vmmap_file)\n plot.show()\n\n def _asmap_deref(self, args):\n plot = AddressMapCapDerefPlot(args.trace, args.cache)\n if args.vmmap_file:\n plot.set_vmmap(args.vmmap_file)\n plot.show()\n\n def _asmap_syscall(self, args):\n plot = SyscallAddressMapPlot(args.trace, args.cache)\n if args.vmmap_file:\n plot.set_vmmap(args.vmmap_file)\n plot.show()\n\n def _pfreq(self, args):\n plot = PointedAddressFrequencyPlot(args.trace, args.cache)\n if args.vmmap_file:\n plot.set_vmmap(args.vmmap_file)\n plot.show()\n\n def _run(self, args):\n if args.outfile:\n plot.plot_file = args.outfile\n args.handler(args)\n\n\ndef main():\n tool = ProvenancePlotTool()\n tool.run()\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "tools/pointer_provenance.py", "file_name": "pointer_provenance.py", "file_ext": "py", "file_size_in_byte": 4339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 43, "usage_type": "call"}, {"api_name": "cheriplot.core.tool.PlotTool", "line_number": 45, "usage_type": "name"}, {"api_name": "cheriplot.plot.provenance.ProvenanceTreePlot", "line_number": 80, "usage_type": "call"}, {"api_name": "cheriplot.plot.provenance.AddressMapCapCreatePlot", "line_number": 84, "usage_type": "call"}, {"api_name": "cheriplot.plot.provenance.AddressMapCapDerefPlot", "line_number": 90, "usage_type": "call"}, {"api_name": "cheriplot.plot.provenance.SyscallAddressMapPlot", "line_number": 96, "usage_type": "call"}, {"api_name": "cheriplot.plot.provenance.PointedAddressFrequencyPlot", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "188152101", "text": "from __future__ import absolute_import, print_function, division\n\nimport numpy as np\nimport argparse\nimport time\nimport gym\n\nfrom gym_brt.envs import QubeBeginUprightEnv, QubeBeginDownEnv\n\ntry:\n import tensorflow as tf\nexcept Exception as e:\n raise ImportError(\"Please install TensorFlow.\")\ntry:\n from baselines.common.vec_env.dummy_vec_env import DummyVecEnv\n from baselines.ppo2.ppo2 import learn as learn_ppo2\n from baselines import logger\nexcept:\n raise ImportError(\n \"Please install OpenAI baselines from: https://github.com/openai/baselines.\"\n )\n\n\ndef main(args):\n if args.network == \"mlp\":\n network_kwargs = {\n \"num_layers\": 2,\n \"num_hidden\": 64,\n \"activation\": tf.tanh,\n \"layer_norm\": False,\n }\n elif args.network == \"lstm\":\n network_kwargs = {\"nlstm\": 128, \"layer_norm\": False}\n else:\n raise ValueError(\"{} is not a valid network type.\".format(args.network))\n\n print(\"Using a {} network\".format(args.network))\n\n try:\n logger.configure(dir=args.checkpoint_dir)\n\n if args.env == \"up\":\n qube_env = QubeBeginUprightEnv\n elif args.env == \"down\":\n qube_env = QubeBeginDownEnv\n else:\n raise ValueError\n\n env = lambda *a, **k: qube_env(frequency=args.frequency)\n env = DummyVecEnv([env])\n\n model = learn_ppo2(\n network=args.network,\n env=env,\n total_timesteps=int(float(args.num_steps)),\n nsteps=2048,\n ent_coef=0.0,\n lr=lambda f: 3e-4 * f,\n vf_coef=0.5,\n max_grad_norm=0.5,\n gamma=0.99,\n lam=0.95,\n log_interval=1,\n nminibatches=32,\n noptepochs=10,\n cliprange=0.2,\n save_interval=int(\n np.ceil(args.save_interval / 2048)\n ), # Gives nicer nubers than x//2048...\n load_path=args.load_path,\n **network_kwargs\n )\n\n if args.save_path is not None:\n print(\"Saving model at {}\".format(args.save_path))\n model.save(args.save_path)\n\n if args.play:\n print(\"Running trained model\")\n e = env.envs[0]\n obs = e.reset()\n while True:\n actions, _, state, _ = model.step(obs)\n obs, r, done, _ = e.step(actions[0])\n done = done.any() if isinstance(done, np.ndarray) else done\n if done:\n obs = e.reset()\n e.hard_reset()\n\n finally:\n for e in env.envs:\n e.close()\n\n\nif __name__ == \"__main__\":\n\n # Parse command line args\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--env\",\n \"-e\",\n default=\"up\",\n type=str,\n choices=[\"up\", \"down\"],\n help=\"Enviroment to run.\",\n )\n parser.add_argument(\n \"--network\",\n \"-nn\",\n default=\"mlp\",\n type=str,\n choices=[\"mlp\", \"lstm\"],\n help=\"Type of neural network to use.\",\n )\n parser.add_argument(\n \"--num_steps\", \"-n\", default=0, help=\"Total number of steps to run.\"\n )\n parser.add_argument(\n \"--frequency\",\n \"-f\",\n default=\"250\",\n type=float,\n help=\"The frequency of samples on the Quanser hardware.\",\n )\n parser.add_argument(\n \"--save_interval\",\n \"-si\",\n default=\"10000\",\n type=float,\n help=\"How often to save the model (rounded up to nearest multiple of 2048).\",\n )\n parser.add_argument(\n \"--play\", \"-p\", action=\"store_true\", help=\"Run the trained network\"\n )\n parser.add_argument(\"--save_path\", \"-s\", type=str)\n parser.add_argument(\"--load_path\", \"-l\", type=str)\n parser.add_argument(\"--checkpoint_dir\", \"-c\", type=str)\n args, _ = parser.parse_known_args()\n\n main(args)\n", "sub_path": "tests/qube_ppo.py", "file_name": "qube_ppo.py", "file_ext": "py", "file_size_in_byte": 3931, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "tensorflow.tanh", "line_number": 29, "usage_type": "attribute"}, {"api_name": "baselines.logger.configure", "line_number": 40, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 40, "usage_type": "name"}, {"api_name": "gym_brt.envs.QubeBeginUprightEnv", "line_number": 43, "usage_type": "name"}, {"api_name": "gym_brt.envs.QubeBeginDownEnv", "line_number": 45, "usage_type": "name"}, {"api_name": "baselines.common.vec_env.dummy_vec_env.DummyVecEnv", "line_number": 50, "usage_type": "call"}, {"api_name": "baselines.ppo2.ppo2.learn", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 85, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "632568958", "text": "import random\nimport sys\n\nsys.path.append(\"\")\nsys.path.append(\"../..\")\nfrom torch import no_grad, nn\nfrom torch.optim.lr_scheduler import ExponentialLR\nfrom tqdm import tqdm\n\nimport os\n\nimport pandas as pd\nimport torch\n\nfrom Marie.Util.Models.TransR import TransR\nfrom Marie.Util.CommonTools.FileLoader import FileLoader\nfrom Marie.Util.Dataset.TransR_Inference_Dataset import TransRInferenceDataset\nfrom Marie.Util.location import DATA_DIR\nfrom Marie.Util.NHopExtractor import HopExtractor\n\n\ndef hit_rate(true_tail_idx_ranking_list):\n hit_1 = 0\n hit_5 = 0\n hit_10 = 0\n counter = 0\n for true_tail_idx_ranking in true_tail_idx_ranking_list:\n if true_tail_idx_ranking == 0:\n hit_1 += 1\n hit_5 += 1\n hit_10 += 1\n elif 0 < true_tail_idx_ranking <= 4:\n hit_5 += 1\n hit_10 += 1\n elif 4 < true_tail_idx_ranking <= 9:\n hit_10 += 1\n counter += 1\n hit_1 = hit_1 / counter\n hit_5 = hit_5 / counter\n hit_10 = hit_10 / counter\n return hit_1, hit_5, hit_10\n\n\ndef evaluate_ranking(distances, all_tails, true_tail):\n _, B = torch.topk(distances, k=len(distances), largest=False)\n B = B.tolist()\n selected_candidates = [all_tails.tolist()[idx] for idx in B]\n if true_tail in selected_candidates:\n f_ranking_idx = selected_candidates.index(true_tail)\n f_ranking = 1 / (f_ranking_idx + 1)\n else:\n f_ranking = 0\n f_ranking_idx = -1\n return f_ranking, f_ranking_idx\n\n\n# This class is used to train TransR embedding with Inference feature\nclass TransRATrainer:\n def __init__(self, full_dataset_dir, ontology, batch_size=32, epoch_num=100, dim=20, learning_rate=1.0, gamma=1,\n test=False, use_projection=False, alpha=0.1, margin=5, resume=False, inference=True, global_neg=False,\n gpu_number=1):\n self.full_dataset_dir = full_dataset_dir\n self.ontology = ontology\n self.learning_rate = learning_rate\n self.gamma = gamma\n self.dim = dim\n self.gpu_number = gpu_number\n self.batch_size = batch_size\n self.test = test\n self.use_projection = use_projection\n self.alpha = alpha\n self.margin = margin\n self.resume = resume\n self.global_neg = global_neg\n self.inference = inference\n self.my_extractor = HopExtractor(\n dataset_dir=self.full_dataset_dir,\n dataset_name=self.ontology)\n\n if os.path.exists(os.path.join(full_dir, f\"{self.ontology}-train-2.txt\")):\n df_train = pd.read_csv(os.path.join(full_dir, f\"{self.ontology}-train-2.txt\"), sep=\"\\t\", header=None)\n else:\n df_train = pd.read_csv(os.path.join(full_dir, f\"{self.ontology}-train.txt\"), sep=\"\\t\", header=None)\n self.df_train = df_train\n if len(df_train) < 500:\n df_train_small = df_train\n else:\n df_train_small = df_train.sample(frac=0.01)\n self.file_loader = FileLoader(full_dataset_dir=self.full_dataset_dir, dataset_name=self.ontology)\n self.entity2idx, self.idx2entity, self.rel2idx, self.idx2rel = self.file_loader.load_index_files()\n numerical_eval_path = os.path.join(full_dir, f\"numerical_eval.tsv\")\n\n # ============================== TODO: clean up the training data loading mechanism ============================\n # ========================================== CREATE DATASET FOR TRAINING ================================\n # 1. if test is used, use the df_train_small instead of the full dataset\n # 2. Inference flag should only effect whether the evaluation uses inference\n # =======================================================================================================\n\n # ================================== Load test set for inference =======================================\n if self.inference:\n df_test = pd.read_csv(os.path.join(full_dir, f\"{self.ontology}-test.txt\"), sep=\"\\t\", header=None)\n test_set = TransRInferenceDataset(df_test, full_dataset_dir=self.full_dataset_dir,\n ontology=self.ontology,\n mode=\"test\")\n self.test_dataloader = torch.utils.data.DataLoader(test_set,\n batch_size=test_set.candidate_max * self.gpu_number,\n shuffle=False)\n # ================================== Load training set for general embedding ============================\n if self.test:\n print(\"Using small dataset for testing\")\n df_train = df_train_small\n\n train_set = TransRInferenceDataset(df_train, full_dataset_dir=self.full_dataset_dir,\n ontology=self.ontology,\n mode=\"general_train\", global_neg=self.global_neg)\n self.train_dataloader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)\n\n # ==================================== Load evaluation set, which is a smaller training set ===============\n train_set_eval = TransRInferenceDataset(df_train_small, full_dataset_dir=self.full_dataset_dir,\n ontology=self.ontology,\n mode=\"general_train_eval\", global_neg=self.global_neg)\n self.train_dataloader_eval = torch.utils.data.DataLoader(train_set_eval,\n batch_size=train_set_eval.ent_num * self.gpu_number,\n shuffle=False)\n # ======================== Load training and evaluation set for singular nodes ==============================\n # check whether singular-train.tsv exist\n value_node_path = os.path.join(full_dir, f\"{self.ontology}-singular-train.txt\")\n if os.path.exists(value_node_path):\n print(\"Singular node training set exists\", value_node_path)\n # df_value_node = pd.read_csv(value_node_path, sep=\"\\t\", header=None)\n df_value_node = self.df_train\n self.value_node_exists = True\n else:\n print(\"Singular node trianing set not exists\", value_node_path)\n df_value_node = self.df_train\n self.value_node_exists = False\n\n if self.value_node_exists:\n value_node_eval_set = TransRInferenceDataset(df=df_train_small, full_dataset_dir=self.full_dataset_dir,\n ontology=self.ontology,\n mode=\"value_node_eval\")\n value_node_set = TransRInferenceDataset(df=df_value_node, full_dataset_dir=self.full_dataset_dir,\n ontology=self.ontology,\n mode=\"value_node\")\n self.dataloader_value_node = torch.utils.data.DataLoader(value_node_set, batch_size=self.batch_size,\n shuffle=True)\n self.dataloader_value_node_eval = torch.utils.data.DataLoader(value_node_eval_set,\n batch_size=value_node_eval_set.ent_num,\n shuffle=False)\n # ============================================================================================================\n\n self.use_cuda = torch.cuda.is_available()\n # self.use_cuda = False\n device = torch.device(\"cuda\" if self.use_cuda else \"cpu\")\n self.device = device\n print(f\"==================== USING {self.device} =====================\")\n # ------------------------- Training hyperparameters -----------------------\n self.epoch_num = epoch_num\n self.model = TransR(rel_dim=self.dim, rel_num=len(self.rel2idx.keys()), ent_dim=self.dim,\n ent_num=len(self.entity2idx.keys()), device=self.device,\n use_projection=self.use_projection, alpha=self.alpha,\n margin=margin, resume_training=self.resume, dataset_path=self.full_dataset_dir)\n\n if self.use_cuda:\n self.model = nn.DataParallel(self.model)\n self.model.to(self.device)\n\n self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)\n self.scheduler = ExponentialLR(self.optimizer, gamma=self.gamma)\n\n def export_embeddings(self):\n if self.use_cuda:\n self.write_embeddings(self.model.module.ent_embedding, \"ent_embedding\")\n self.write_embeddings(self.model.module.rel_embedding, \"rel_embedding\")\n self.write_embeddings(self.model.module.attr_embedding, \"attr_embedding\")\n self.write_embeddings(self.model.module.bias_embedding, \"bias_embedding\")\n self.write_embeddings(self.model.module.proj_matrix, \"proj_matrix\")\n else:\n self.write_embeddings(self.model.ent_embedding, \"ent_embedding\")\n self.write_embeddings(self.model.rel_embedding, \"rel_embedding\")\n self.write_embeddings(self.model.attr_embedding, \"attr_embedding\")\n self.write_embeddings(self.model.bias_embedding, \"bias_embedding\")\n self.write_embeddings(self.model.proj_matrix, \"proj_matrix\")\n\n def write_embeddings(self, embedding, embedding_name):\n lines = []\n for embedding in embedding.weight.data:\n line = '\\t'.join([str(l) for l in embedding.tolist()])\n lines.append(line)\n content = '\\n'.join(lines)\n with open(os.path.join(DATA_DIR, self.full_dataset_dir, f'{embedding_name}.tsv'), 'w') as f:\n f.write(content)\n f.close()\n\n def inference_evaluation(self):\n\n total_mrr, counter, filtered_counter, total_fmrr = 0, 0, 0, 0\n hit_rate_list = []\n filtered_hit_rate_list = []\n for test_set in tqdm(self.test_dataloader):\n heads, rels, all_tails, true_tail = test_set[0], test_set[1], test_set[2], test_set[3][0].item()\n selected_idx = (all_tails >= 0)\n heads, rels, all_tails = heads[selected_idx], rels[selected_idx], all_tails[selected_idx]\n triples = torch.stack((heads, rels, all_tails)).type(torch.LongTensor)\n if self.use_cuda:\n distances = self.model.module.infer(triples)\n else:\n distances = self.model.infer(triples)\n f_ranking, f_ranking_idx = evaluate_ranking(distances=distances, all_tails=all_tails,\n true_tail=true_tail)\n filtered_counter += 1\n counter += 1\n total_fmrr += f_ranking\n hit_rate_list.append(f_ranking_idx)\n filtered_hit_rate_list.append(f_ranking_idx)\n\n filtered_hit_rate_list = hit_rate(filtered_hit_rate_list)\n total_fmrr = total_fmrr / filtered_counter\n print(\"=================== Inference evaluation result ====================\")\n print(f\"total infer fmrr: {total_fmrr}\")\n print(f\"filtered infer hit rate: {filtered_hit_rate_list}\")\n print(\"====================================================================\")\n\n def evaluate(self):\n with no_grad():\n self.model.eval()\n total_mrr = 0\n counter = 0\n filtered_hit_rate_list = []\n for test_set in tqdm(self.train_dataloader_eval):\n heads, rels, all_tails, true_tail = test_set[0], test_set[1], test_set[2], test_set[3][0].item()\n selected_idx = (all_tails >= 0)\n heads, rels, all_tails = heads[selected_idx], rels[selected_idx], all_tails[selected_idx]\n triples = torch.stack((heads, rels, all_tails)).type(torch.LongTensor)\n if self.use_cuda:\n distances = self.model.module.predict(triples=triples)\n else:\n distances = self.model.predict(triples=triples)\n f_ranking, f_ranking_idx = evaluate_ranking(distances=distances, all_tails=all_tails,\n true_tail=true_tail)\n filtered_hit_rate_list.append(f_ranking_idx)\n counter += 1\n total_mrr += f_ranking\n\n total_mrr = total_mrr / counter\n filtered_hit_rate_list = hit_rate(filtered_hit_rate_list)\n print(\"=================== Training set evaluation ===================\")\n print(f\"total train mrr: {total_mrr}\")\n print(f\"the training hit rate list is : {filtered_hit_rate_list}\")\n print(\"===============================================================\")\n\n if self.inference:\n self.inference_evaluation()\n\n def train(self):\n \"\"\"\n Split the the training set into non-numerical and numerical subsets\n marked by [3] == -999 or not\n :return:\n \"\"\"\n self.model.train()\n total_train_loss = 0\n total_numerical_loss = 0\n total_non_numerical_loss = 0\n # if self.test:\n # self.train_dataloader = self.train_dataloader_small\n # # in test mode, use self.train_dataloader_small\n print(\"Starting the training\")\n for pos, neg in tqdm(self.train_dataloader):\n self.optimizer.zero_grad()\n numerical_idx_list = (pos[3] != -999)\n pos = torch.transpose(torch.stack(pos), 0, 1)\n # pos_numerical = torch.transpose(pos[numerical_idx_list], 0, 1).to(self.device)\n pos_numerical = pos[numerical_idx_list].to(self.device)\n # print(pos_numerical)\n # pos_non_numerical = torch.transpose(pos[~numerical_idx_list], 0, 1).to(self.device)\n # create negative index list with ~\n pos_non_numerical = pos[~numerical_idx_list].to(self.device)\n neg = torch.transpose(torch.stack(neg), 0, 1)\n # neg_numerical = torch.transpose(neg[numerical_idx_list], 0, 1).to(self.device)\n neg_numerical = neg[numerical_idx_list].to(self.device)\n # neg_non_numerical = torch.transpose(neg[~numerical_idx_list], 0, 1).to(self.device)\n neg_non_numerical = neg[~numerical_idx_list].to(self.device)\n\n loss_non_numerical = self.model(pos_non_numerical, neg_non_numerical, mode=\"non_numerical\")\n loss_non_numerical.mean().backward()\n total_non_numerical_loss += loss_non_numerical.cpu().mean()\n self.optimizer.step()\n\n if len(pos_numerical) > 0:\n if len(pos_numerical[0]) > 0:\n if len(pos_numerical) == 1:\n pos_numerical = pos_numerical.repeat(self.gpu_number, 1)\n neg_numerical = neg_numerical.repeat(self.gpu_number, 1)\n loss_numerical = self.model(pos_numerical, neg_numerical, mode=\"numerical\") * 0.0001\n loss_numerical.mean().backward()\n total_numerical_loss += loss_numerical.cpu().mean()\n self.optimizer.step()\n\n if self.use_cuda:\n self.model.module.normalize_parameters()\n else:\n self.model.normalize_parameters()\n self.optimizer.step()\n\n print(f\"Loss: {total_train_loss}\")\n print(f\"Numerical Loss: {total_numerical_loss}\")\n print(f\"Non Numerical Loss: {total_non_numerical_loss}\")\n\n def run(self):\n for epoch in range(self.epoch_num + 1):\n print(f\"Epoch: {epoch}\")\n self.train()\n if epoch % 5 == 0:\n self.scheduler.step()\n self.evaluate()\n if self.value_node_exists:\n self.calculate_value_node_embedding()\n self.evaluate_value_node_embedding()\n self.export_embeddings()\n print(f\"Current learning rate: {self.scheduler.get_lr()}\")\n\n # if self.inference:\n if self.value_node_exists:\n self.calculate_value_node_embedding()\n self.evaluate_value_node_embedding()\n self.export_embeddings()\n\n def calculate_value_node_embedding(self):\n with no_grad():\n for triple in tqdm(self.dataloader_value_node):\n if self.use_cuda:\n self.model.module.calculate_tail_embedding(triple)\n else:\n self.model.calculate_tail_embedding(triple)\n\n def evaluate_value_node_embedding(self):\n with no_grad():\n filtered_hit_rate_list = []\n counter = 0\n total_mrr = 0\n for triple in tqdm(self.dataloader_value_node_eval):\n true_tail = triple[3][0].item()\n all_tails = triple[2]\n if self.use_cuda:\n distances = self.model.module.distance(triple)\n else:\n distances = self.model.distance(triple)\n f_ranking, f_ranking_idx = evaluate_ranking(distances=distances, all_tails=all_tails,\n true_tail=true_tail)\n filtered_hit_rate_list.append(f_ranking_idx)\n counter += 1\n total_mrr += f_ranking\n total_mrr = total_mrr / counter\n filtered_hit_rate_list = hit_rate(filtered_hit_rate_list)\n print(\"=================== Node value set evaluation ===================\")\n print(f\"total value node mrr: {total_mrr}\")\n print(f\"the value node hit rate list is : {filtered_hit_rate_list}\")\n print(\"===============================================================\")\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--dimension\", help=\"dimension of embedding\")\n parser.add_argument(\"-lr\", \"--learning_rate\", help=\"starting learning rate\")\n parser.add_argument(\"-g\", \"--gamma\", help=\"gamma for scheduler\")\n parser.add_argument(\"-o\", \"--ontology\", help=\"main ontology used\")\n parser.add_argument(\"-so\", \"--sub_ontology\", help=\"name of the sub ontology\")\n parser.add_argument(\"-bs\", \"--batch_size\", help=\"size of mini batch\")\n parser.add_argument(\"-test\", \"--test_mode\", help=\"if true, the training will use a smaller training set\")\n parser.add_argument(\"-proj\", \"--use_projection\", help=\"if true, use projection in numerical linear regression\")\n parser.add_argument(\"-alpha\", \"--alpha\", help=\"ratio between l_a and l_r\")\n parser.add_argument(\"-margin\", \"--margin\", help=\"margin for MarginRankLoss\")\n parser.add_argument(\"-epoch\", \"--epoch\", help=\"number of epochs\")\n parser.add_argument(\"-resume\", \"--resume\", help=\"resume the training by loading embeddings \")\n parser.add_argument(\"-global_neg\", \"--global_neg\", help=\"whether use all entities as negative samples\")\n parser.add_argument(\"-inference\", \"--inference\", help=\"whether try to do inference with the ontology\")\n parser.add_argument(\"-gpu_num\", \"--gpu_number\", help=\"number of gpus used\")\n args = parser.parse_args()\n\n gpu_number = 1\n if args.gpu_number:\n gpu_number = int(args.gpu_number)\n\n dim = 20\n if args.dimension:\n dim = int(args.dimension)\n\n learning_rate = 0.01\n if args.learning_rate:\n learning_rate = float(args.learning_rate)\n\n alpha = 0.1\n if args.alpha:\n alpha = float(args.alpha)\n\n margin = 5\n if args.margin:\n margin = float(args.margin)\n\n gamma = 1\n if args.gamma:\n gamma = float(args.gamma)\n\n batch_size = 256\n if args.batch_size:\n batch_size = int(args.batch_size)\n\n epoch = 100\n if args.epoch:\n epoch = int(args.epoch)\n\n ontology = \"ontospecies_new\"\n if args.ontology:\n ontology = args.ontology\n\n sub_ontology = None\n if args.sub_ontology:\n sub_ontology = args.sub_ontology\n\n test = False\n if args.test_mode:\n if args.test_mode.lower() == \"yes\":\n test = True\n elif args.test_mode.lower() == \"no\":\n test = False\n else:\n test = False\n\n use_projection = False\n if args.use_projection:\n if args.use_projection.lower() == \"yes\":\n use_projection = True\n elif args.use_projection.lower() == \"no\":\n use_projection = False\n else:\n use_projection = False\n\n resume = False\n if args.resume:\n if args.resume.lower() == \"yes\":\n resume = True\n elif args.resume.lower() == \"no\":\n resume = False\n else:\n resume = False\n\n global_neg = False\n if args.global_neg:\n if args.global_neg.lower() == \"yes\":\n global_neg = True\n elif args.global_neg.lower() == \"no\":\n global_neg = False\n else:\n global_neg = False\n\n inference = False\n if args.inference:\n if args.inference.lower() == \"yes\":\n inference = True\n elif args.inference.lower() == \"no\":\n inference = False\n else:\n inference = False\n\n print(f\"Dimension: {dim}\")\n print(f\"Learning rate: {learning_rate}\")\n print(f\"Gamma: {gamma}\")\n print(f\"Test: {test}\")\n print(f\"Batch size: {batch_size}\")\n print(f\"Alpha: {alpha}\")\n print(f\"Use projection: {use_projection}\")\n print(f\"Test: {test}\")\n print(f\"Epoch: {epoch}\")\n print(f\"Resume training: {resume}\")\n print(f\"Number of GPUs: {gpu_number}\")\n\n batch_size = batch_size * gpu_number\n\n if sub_ontology:\n full_dir = os.path.join(DATA_DIR, 'CrossGraph', f'{ontology}/{sub_ontology}')\n ontology = sub_ontology\n else:\n full_dir = os.path.join(DATA_DIR, 'CrossGraph', f'{ontology}')\n\n my_trainer = TransRATrainer(full_dataset_dir=full_dir, ontology=ontology, batch_size=32, dim=dim,\n learning_rate=learning_rate, test=test, use_projection=use_projection, alpha=alpha,\n margin=margin, epoch_num=epoch, gamma=gamma, resume=resume, inference=inference,\n global_neg=global_neg, gpu_number=gpu_number)\n\n my_trainer.run()\n", "sub_path": "MARIE_AND_BERT/Training/Trainers/TransRATrainer.py", "file_name": "TransRATrainer.py", "file_ext": "py", "file_size_in_byte": 22535, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.topk", "line_number": 45, "usage_type": "call"}, {"api_name": "Marie.Util.NHopExtractor.HopExtractor", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "Marie.Util.CommonTools.FileLoader.FileLoader", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "Marie.Util.Dataset.TransR_Inference_Dataset.TransRInferenceDataset", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 105, "usage_type": "attribute"}, {"api_name": "Marie.Util.Dataset.TransR_Inference_Dataset.TransRInferenceDataset", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 116, "usage_type": "attribute"}, {"api_name": "Marie.Util.Dataset.TransR_Inference_Dataset.TransRInferenceDataset", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "Marie.Util.Dataset.TransR_Inference_Dataset.TransRInferenceDataset", "line_number": 139, "usage_type": "call"}, {"api_name": "Marie.Util.Dataset.TransR_Inference_Dataset.TransRInferenceDataset", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 145, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 147, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 152, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 154, "usage_type": "call"}, {"api_name": "Marie.Util.Models.TransR.TransR", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn.DataParallel", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 165, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 168, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.ExponentialLR", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 191, "usage_type": "call"}, {"api_name": "Marie.Util.location.DATA_DIR", "line_number": 191, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 204, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 225, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 234, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 269, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 272, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 272, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 330, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 331, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 338, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 342, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 365, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 483, "usage_type": "call"}, {"api_name": "Marie.Util.location.DATA_DIR", "line_number": 483, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 483, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 486, "usage_type": "call"}, {"api_name": "Marie.Util.location.DATA_DIR", "line_number": 486, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 486, "usage_type": "attribute"}]} +{"seq_id": "471905292", "text": "import os\nimport time\nfrom multiprocessing import Pool\nfrom timer import DecoratorTimer\n\ndef task(name):\n begin = time.time()\n print('Task %s (%s) is starting ...' % (name, os.getpid()))\n time.sleep(2)\n end = time.time()\n print('Task %s (%s) is done ...' % (name, os.getpid()))\n\n#if __name__ == '__main__':\n@DecoratorTimer\ndef main():\n # begin = time.time()\n print('Main process (%s) start' % os.getpid())\n # You can specify the number of process running during the same time\n # by set option processes=number\n pool = Pool(9)\n for i in range(9):\n pool.apply_async(task, args=(i,))\n pool.close()\n pool.join()\n # end = time.time()\n # print('All job done in %.3f seconds' % (end - begin))\n\nmain()\n", "sub_path": "misc/multiprocessing4.py", "file_name": "multiprocessing4.py", "file_ext": "py", "file_size_in_byte": 746, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "time.time", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 8, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 9, "usage_type": "call"}, {"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 17, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 20, "usage_type": "call"}, {"api_name": "timer.DecoratorTimer", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "571366144", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n\ndef input_gen_fn(train_path: str, valid_path: str = None):\n if valid_path:\n paths = [train_path, valid_path]\n else:\n paths = [train_path]\n\n gen_list = np.zeros(2, dtype='object')\n for idx, path in enumerate(paths):\n gen_list[idx] = ImageDataGenerator(rescale=1 / 255.,\n horizontal_flip=True,\n shear_range=.2,\n rotation_range=.45,\n width_shift_range=.2,\n height_shift_range=.2,\n zoom_range=.2,\n fill_mode='nearest')\n gen_list[idx] = gen_list[idx].flow_from_directory(path,\n target_size=(300, 300),\n batch_size=128,\n class_mode='binary')\n return gen_list\n\n\ndef gen_convnet(shape: (list, tuple) = (300, 300, 3)):\n # model architecture\n mdl_input = keras.Input(shape)\n mdl_layer = keras.layers.Conv2D(16, (3, 3), activation='relu')(mdl_input)\n mdl_layer = keras.layers.MaxPool2D(2, 2)(mdl_layer)\n mdl_layer = keras.layers.Conv2D(32, (3, 3), activation='relu')(mdl_layer)\n mdl_layer = keras.layers.MaxPool2D(2, 2)(mdl_layer)\n mdl_layer = keras.layers.Conv2D(64, (3, 3), activation='relu')(mdl_layer)\n mdl_layer = keras.layers.MaxPool2D(2, 2)(mdl_layer)\n mdl_layer = keras.layers.Flatten()(mdl_layer)\n mdl_layer = keras.layers.Dense(512, activation='relu')(mdl_layer)\n mdl_layer = keras.layers.Dense(64, activation='relu')(mdl_layer)\n # max(self.train.class_indices.values()) - it starts from 0\n mdl_logit = keras.layers.Dense(1, activation='sigmoid')(mdl_layer)\n\n # model build and compile\n model = keras.Model(mdl_input, mdl_logit)\n model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=.001),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n return model\n\n\ndef acc_loss_plot(mdl_history):\n \"\"\"\n Plots model accuracy, validation accuracy, loss and validation loss in respect with epochs\n\n :param mdl_history: trained model\n \"\"\"\n acc = mdl_history.history['accuracy']\n loss = mdl_history.history['loss']\n try:\n val_acc = mdl_history.history['val_accuracy']\n val_loss = mdl_history.history['val_loss']\n validation = True\n except KeyError:\n validation = False\n\n epochs = range(len(acc))\n # plot accuracy figure\n plt.plot(epochs, acc, 'r', label='Training accuracy')\n if validation:\n plt.plot(epochs, val_acc, 'b', label='Validation accuracy')\n plt.title('Training and validation accuracy')\n else:\n plt.title('Training accuracy')\n plt.legend()\n plt.figure()\n # plot loss figure\n plt.plot(epochs, loss, 'r', label='Training Loss')\n if validation:\n plt.plot(epochs, val_loss, 'b', label='Validation Loss')\n plt.title('Training and validation loss')\n else:\n plt.title('Training loss')\n plt.legend()\n plt.show()\n\n\nif __name__ == '__main__':\n # load data from paths\n t_path = '../Introduction/tmp/horse-or-human/'\n v_path = '../Introduction/tmp/validation-horse-or-human/'\n gen_train, gen_valid = input_gen_fn(t_path, v_path)\n\n # Convnet\n model = gen_convnet()\n history = model.fit(gen_train,\n steps_per_epoch=8,\n epochs=1,\n # validation_data=gen_valid,\n verbose=2)\n # visualise accuracy and loss\n acc_loss_plot(history)\n", "sub_path": "Convnets/week2_1.py", "file_name": "week2_1.py", "file_ext": "py", "file_size_in_byte": 3916, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.keras.Input", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 32, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 33, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPool2D", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 34, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 35, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPool2D", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 36, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 37, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPool2D", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 38, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 39, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 40, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 41, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 43, "usage_type": "name"}, {"api_name": "tensorflow.keras.Model", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 46, "usage_type": "name"}, {"api_name": "tensorflow.keras.optimizers.RMSprop", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}]} +{"seq_id": "252284013", "text": "#!/usr/bin/env python2\n# coding: utf-8\n\n# from seg import seg\nfrom sklearn.externals import joblib\nfrom classifier import onehot\nfrom embed import sent2vec\nimport numpy as np\n\n\nreviews = {}\n\n# always return 0 regardless of review_id and aspect\ndef all_zero(review_id, aspect):\n return 0\n\n\ndef use_classifier(review_id, aspect):\n clf = joblib.load('RandomForestClassifier.pkl')\n review = reviews[review_id]\n vec = sent2vec(review)\n X = np.append(vec, onehot(aspect))\n X = X.reshape(1, -1)\n pred = clf.predict(X)\n # print(pred)\n return int(pred[0])\n\ndef read_test_review():\n with open('../data/test_review_seg.txt') as test_review_file:\n lines = test_review_file.readlines()\n for i in range(len(lines) // 2):\n review_id, review = int(lines[2*i].strip()), lines[2*i+1].strip()\n reviews[review_id] = review\n \n return reviews\n\n\ndef make_prediction(get_label):\n import pandas as pd\n \n reviews = read_test_review()\n df_test = pd.read_csv('../data/test.csv')\n df_label = df_test[['Review_id', 'Aspect']].apply(lambda x: get_label(*x), axis=1).to_frame()\n df_concat = pd.concat([df_test[['Id']], df_label], axis=1)\n df_concat.columns = ('Id', 'Label')\n df_concat.to_csv('submission.csv', index=False)\n \n return df_concat\n\n\nmake_prediction(use_classifier)\n\n", "sub_path": "project1/src/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1337, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sklearn.externals.joblib.load", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 19, "usage_type": "name"}, {"api_name": "embed.sent2vec", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 22, "usage_type": "call"}, {"api_name": "classifier.onehot", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "377660742", "text": "\nimport argparse\nimport socket\nfrom datetime import datetime\nimport sys\nimport os\nimport pickle\nfrom collections import deque\nfrom uuid import uuid4\n\n\nclass Task:\n\tdef __init__(self, length, data):\n\t\tself.length = length\n\t\tself.data = data\n\t\tself.id = uuid4().hex\n\t\tself.time = 0\n\n\tdef is_in_work(self, current_time, timeout):\n\t\treturn current_time - self.time < timeout\n\n\tdef set_time(self):\n\t\tself.time = int(datetime.now().timestamp())\n\n\n\n\nclass TaskQueueServer:\n\tfilename = 'log'\n\n\n\tdef __init__(self, ip, port, path, timeout):\n\t\tself.ip = ip\n\t\tself.port = port\n\t\tself.path = path\n\t\tself.timeout = timeout\n\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.buffer = 4096\n\t\tself.filepath = os.path.join(self.path, self.filename)\n\t\tself.commands = {\n\t\t\t'ADD':\tself.add_command,\n\t\t\t'GET':\tself.get_command,\n\t\t\t'ACK':\tself.ack_command,\n\t\t\t'IN':\tself.in_command,\n\t\t\t'SAVE':\tself.save_command,\n\t\t}\n\t\tself.current_time = 0\n\t\tself.LIST_OF_QUEUES = self.load()\n\n\n\tdef load(self):\n\t\ttry:\n\t\t\twith open(self.filename, 'rb') as f:\n\t\t\t\tdata = pickle.load(f)\n\t\t\treturn data\n\t\texcept:\n\t\t\t# print(f'could not open {self.filepath}')\n\t\t\treturn {}\n\n\n\tdef terminate(self, conn):\n\t\tconn.shutdown(1)\n\t\tconn.close()\n\n\n\tdef recvall(self, conn):\n\t\tdata = b''\n\t\twhile True:\n\t\t\tbuf = conn.recv(self.buffer)\n\t\t\tdata += buf\n\t\t\tif len(buf) < self.buffer:\n\t\t\t\tbreak\n\t\treturn data\n\n\n# commands\n\tdef save_command(self):\n\t\twith open(self.filename, 'wb') as f:\n\t\t\tpickle.dump(self.LIST_OF_QUEUES, f)\n\t\treturn 'OK'\n\n\n\tdef add_command(self, queue_name, length, data):\n\t\tif int(length) > 10**6 or int(length) != len(data):\n\t\t\treturn 'ERROR'\n\t\tqueue = self.LIST_OF_QUEUES.get(queue_name)\n\t\tif not queue:\n\t\t\tself.LIST_OF_QUEUES[queue_name] = deque()\n\t\t\tqueue = self.LIST_OF_QUEUES.get(queue_name)\n\t\ttask = Task(length, data)\n\t\tqueue.append(task)\n\t\treturn task.id\n\t\t# return f'task id {task.id}\\n'\n\n\n\tdef get_command(self, queue_name):\n\t\tqueue = self.LIST_OF_QUEUES.get(queue_name)\n\t\tif queue:\n\t\t\tfor task in queue:\n\t\t\t\tif not task.is_in_work(self.current_time, self.timeout):\n\t\t\t\t\ttask.set_time()\n\t\t\t\t\treturn f'{task.id} {task.length} {task.data}'\n\t\t\t\t\t#return f'task id: {task.id}\\nlength: {task.length}\\ndata: {task.data}\\n'\n\t\treturn 'NONE'\n\n\n\tdef in_command(self, queue_name, task_id):\n\t\tqueue = self.LIST_OF_QUEUES.get(queue_name)\n\t\tif queue:\n\t\t\tfor task in queue:\n\t\t\t\tif task.id == task_id:\n\t\t\t\t\treturn 'YES'\n\t\treturn 'NO'\n\n\n\tdef ack_command(self, queue_name, task_id):\n\t\tqueue = self.LIST_OF_QUEUES.get(queue_name)\n\t\tif queue:\n\t\t\tfor task in queue:\n\t\t\t\tif task.id == task_id and task.is_in_work(self.current_time, self.timeout):\n\t\t\t\t\tqueue.remove(task)\n\t\t\t\t\tdel task\n\t\t\t\t\treturn 'YES'\n\t\treturn 'NO'\n\n\tdef run(self):\n\t\ttry:\n\t\t\tself.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\t\t\tself.sock.bind((self.ip, self.port))\n\t\t\tself.sock.listen()\n\t\t\t# print(f'server is running on {self.ip} on port {self.port}')\n\t\t\tself.main()\n\n\t\texcept KeyboardInterrupt:\n\t\t\t# print('\\nclosing server..')\n\t\t\t# self.save_command() # save b4 exit\n\t\t\tself.sock.close()\n\t\t\tsys.exit(0)\n\n\n\tdef main(self):\n\t\twhile True:\n\t\t\tconn, addr = self.sock.accept()\n\t\t\tdata = self.recvall(conn).decode().rstrip().split()\n\t\t\tanswer = 'ERROR'\n\n\t\t\tif data:\n\t\t\t\tcommand = data[0]\n\t\t\t\tfunction = self.commands.get(command)\n\t\t\t\tself.current_time = int(datetime.now().timestamp())\n\t\t\t\tif function:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tanswer = function(*data[1:])\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(e)\n\t\t\t\t\t\tpass\n\n\t\t\t\tconn.sendall(answer.encode())\n\t\t\tself.terminate(conn)\n\n\ndef parse_args():\n\tparser = argparse.ArgumentParser(description='This is a simple task queue server with custom protocol')\n\tparser.add_argument(\n\t\t'-p',\n\t\taction=\"store\",\n\t\tdest=\"port\",\n\t\ttype=int,\n\t\tdefault=5555,\n\t\thelp='Server port')\n\tparser.add_argument(\n\t\t'-i',\n\t\taction=\"store\",\n\t\tdest=\"ip\",\n\t\ttype=str,\n\t\tdefault='0.0.0.0',\n\t\thelp='Server ip adress')\n\tparser.add_argument(\n\t\t'-c',\n\t\taction=\"store\",\n\t\tdest=\"path\",\n\t\ttype=str,\n\t\tdefault='./',\n\t\thelp='Server checkpoints dir')\n\tparser.add_argument(\n\t\t'-t',\n\t\taction=\"store\",\n\t\tdest=\"timeout\",\n\t\ttype=int,\n\t\tdefault=300,\n\t\thelp='Task maximum GET timeout in seconds')\n\treturn parser.parse_args()\n\nif __name__ == '__main__':\n\targs = parse_args()\n\tserver = TaskQueueServer(**args.__dict__)\n\tserver.run()", "sub_path": "task_queue/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 4266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "uuid.uuid4", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 37, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 37, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 54, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 79, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 88, "usage_type": "call"}, {"api_name": "socket.SOL_SOCKET", "line_number": 128, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 128, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 138, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 150, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 150, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 163, "usage_type": "call"}]} +{"seq_id": "258541347", "text": "'''\r\nCreated on 2017年2月6日\r\n@function:\r\n 逻辑回归算法\r\n 主要是梯度上升和随机梯度算法\r\n 梯度上升算法是对整个训练集合做循环矩阵运算更新预测值\r\n 随机梯度算法是循环训练集使每一条数据更新一次预测值,要达到理想效果也必须进行多次循环\r\n@author: zz_hdd\r\n'''\r\nfrom numpy import *\r\nfrom matplotlib.pyplot import scatter\r\nfrom builtins import range\r\n\r\ndef loadDataSet():\r\n dataMat = []\r\n labelMat = []\r\n fr = open('testSet.txt')\r\n for line in fr.readlines():\r\n words = line.strip().split()\r\n dataMat.append([1.0, float(words[0]), float(words[1])])\r\n labelMat.append(int(words[2]))\r\n return dataMat,labelMat\r\n\r\n#越阶函数\r\ndef sigmoid(inX):\r\n return 1.0/(1+exp(-inX))\r\n#梯度上升求最优值\r\n#迭代公式 求解真实值和预测值��误差 求乘积更新\r\ndef gradAscent(dataMat, label):\r\n dataMatrix = mat(dataMat)\r\n labelMat = mat(label).transpose()\r\n m,n = shape(dataMatrix)\r\n alpha = 0.001\r\n maxCycles = 500\r\n weights = ones((n,1))\r\n for k in range(maxCycles):\r\n h = sigmoid(dataMatrix*weights)\r\n error = labelMat - h\r\n weights = weights + alpha*dataMatrix.transpose()*error\r\n return weights\r\n\r\n#随机梯度求最优值\r\n#迭代时,由每条数据更新预测值\r\ndef stocGradAscent0(dataMat, label):\r\n m,n = shape(dataMat)\r\n alpha = 0.01\r\n weights = ones(n)\r\n for i in range(m):\r\n h = sigmoid(sum(dataMat[i]*weights))\r\n error = label[i] - h\r\n weights = weights + alpha*error*array(dataMat[i])\r\n return mat(weights).transpose()\r\n\r\n#随机梯度求最优值(优化算法 减少迭代次数和准确度)\r\n#迭代时,由每条数据更新预测值\r\ndef stocGradAscent1(dataMat, label, numIter = 150):\r\n m,n = shape(dataMat)\r\n alpha = 0.01\r\n weights = ones(n)\r\n for j in range(numIter):\r\n dataIndex = list(range(m))\r\n for i in range(m):\r\n aplha = 4/(1.0+j+i) + 0.01\r\n rIndex = int(random.uniform(0,len(dataIndex)))\r\n h = sigmoid(sum(dataMat[rIndex]*weights))\r\n error = label[rIndex] - h\r\n weights = weights + alpha*error*array(dataMat[rIndex])\r\n del(dataIndex[rIndex])\r\n return mat(weights).transpose()\r\n\r\ndef plotBestFit(weights):\r\n import matplotlib.pyplot as plt\r\n dataMat,labelMat = loadDataSet()\r\n dataArr = array(dataMat)\r\n n = shape(dataArr)[0]\r\n xcord1=[]; ycord1=[]\r\n xcord2=[]; ycord2=[]\r\n for i in range(n):\r\n if labelMat[i] == 1:\r\n xcord1.append(dataArr[i,1])\r\n ycord1.append(dataArr[i,2])\r\n else:\r\n xcord2.append(dataArr[i,1])\r\n ycord2.append(dataArr[i,2])\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')\r\n ax.scatter(xcord2, ycord2, s=30, c='green')\r\n #此处书中代码有误 不知是否是2.7和3.4版本区别或者是numpy版本区别\r\n #x,y必须为numpy的array类型\r\n #y = (-weights[0] - weights[1]*x)/weights[2]\r\n #上式得到的y为matrix类型,修改为下式\r\n x = arange(-3.0, 3.0, 0.1)\r\n y = (-weights[0,0] - weights[1,0]*x)/weights[2,0]\r\n ax.plot(x, y)\r\n plt.xlabel(\"X1\")\r\n plt.ylabel(\"X2\")\r\n plt.show()\r\n\r\nif __name__ == '__main__':\r\n dataMat,labelMat = loadDataSet()\r\n print (labelMat[:6])\r\n print (dataMat[0:3])\r\n weights = gradAscent(dataMat, labelMat)\r\n# plotBestFit(weights)\r\n plotBestFit(stocGradAscent0(dataMat, labelMat))", "sub_path": "machineLearn/logistic/logistic.py", "file_name": "logistic.py", "file_ext": "py", "file_size_in_byte": 3578, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "builtins.range", "line_number": 36, "usage_type": "call"}, {"api_name": "builtins.range", "line_number": 48, "usage_type": "call"}, {"api_name": "builtins.range", "line_number": 60, "usage_type": "call"}, {"api_name": "builtins.range", "line_number": 61, "usage_type": "call"}, {"api_name": "builtins.range", "line_number": 62, "usage_type": "call"}, {"api_name": "builtins.range", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "223422790", "text": "import logging\nfrom contextlib import contextmanager\nfrom datetime import datetime\n\n\nclass Logger:\n \"\"\"Logging Uitlity Class for monitoring and debugging\n \"\"\"\n\n def __init__(self,\n name,\n log_fname,\n log_level=logging.INFO,\n custom_log_handler=None):\n\n self.name = name\n self.logger = logging.getLogger(name)\n self.logger.setLevel(log_level)\n ch = logging.FileHandler(log_fname)\n self.logger.addHandler(ch)\n self.logger.addHandler(logging.StreamHandler())\n \n if custom_log_handler:\n if isinstance(custom_log_handler, list):\n for handler in custom_log_handler:\n self.logger.addHandler(handler)\n else:\n self.logger.addHandler(handler)\n\n def kiritori(self):\n self.logger.info('-'*80)\n\n def double_kiritori(self):\n self.logger.info('='*80)\n \n def space(self):\n self.logger.info('\\n')\n\n @contextmanager\n def interval_timer(self, name):\n start_time = datetime.now()\n self.logger.info(\"\\n\")\n self.logger.info(f\"Execution {name} start at {start_time}\")\n try:\n yield\n finally:\n end_time = datetime.now()\n td = end_time - start_time\n self.logger.info(f\"Execution {name} end at {end_time}\")\n self.logger.info(f\"Execution Time : {td}\")\n self.logger.info(\"\\n\")\n\n def __getattr__(self, attr):\n \"\"\"\n for calling logging class attribute\n if you call attributes of other class, raise AttributeError\n \"\"\"\n self.logger.info(f\"{datetime.now()}\")\n return getattr(self.logger, attr)\n", "sub_path": "src/logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 1750, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.INFO", "line_number": 13, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}, {"api_name": "contextlib.contextmanager", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "155165011", "text": "import csv\nimport time\nimport requests\nimport json\n\ndatos = []\n\nheaders = {'content-type': 'application/json'}\n\nprint(\"introduce el access_token\")\naccess_token = input()\n\n\nprint(\"introduce el numero de inicio de publicacion dentro de parsehub\")\nstart_number = input()\n\nwith open('data.csv', newline='') as File:\n reader = csv.reader(File)\n for row in reader:\n\n numero_publicacion = row[0]\n cantidad_actualizada = row[1]\n precio_actualizado = row[2]\n\n data_product = {\n \"price\": precio_actualizado,\n \"available_quantity\": cantidad_actualizada\n }\n\n r = requests.put(f'https://api.mercadolibre.com/items/MLM{numero_publicacion}?access_token={access_token}', data = json.dumps(data_product),headers=headers)\n\n x = [numero_publicacion, cantidad_actualizada,precio_actualizado]\n\n datos.insert(0, x)\n\n with open('data_out.csv', mode='w') as archivo:\n archivo = csv.writer(archivo, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for i in range(len(datos)):\n archivo.writerow(datos[i])\n\n time.sleep(1)\n", "sub_path": "25_actualizar_asinbook_con_archivo/actualizar.py", "file_name": "actualizar.py", "file_ext": "py", "file_size_in_byte": 1189, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "csv.reader", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 30, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 30, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 37, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 37, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "107516791", "text": "#vim: set fileencoding=utf-8\n\nfrom django.core.management.base import BaseCommand\nfrom ctlweb.models import *\nfrom django.contrib.auth.models import Group, Permission, User\nimport datetime\n\nclass Command(BaseCommand):\n option_list = BaseCommand.option_list + ()\n\n help = u\"\"\"Mit diesem Kommando kann eine Demo-Instanz erzeugt werden\"\"\"\n\n def handle(self, *args, **options):\n self.import_cluster()\n self.import_interfaces()\n self.import_user()\n self.import_user_rights()\n self.import_components()\n self.import_connections()\n\n def import_cluster(self):\n self.cl1 = Cluster( hostname=\"https://www.c1.de\", \\\n username=\"foobaa\",\\\n port=22)\n self.cl2 = Cluster( hostname=\"https://www.c2.de\", \\\n username=\"foobaa\",\\\n port=22)\n self.cl3 = Cluster( hostname=\"https://www.c3.de\", \\\n username=\"foobaa\", \\\n port=22)\n\n self.cl1.save()\n self.cl2.save()\n self.cl3.save()\n\n def import_interfaces(self):\n self.i1 = Interfaces(name=\"Matrizen\", ci_hash=\"01\",\n ci=\"Quellcode I1\")\n self.i2 = Interfaces(name=\"Grundlagen\", ci_hash=\"02\",\n ci=\"Quellcode I2\")\n self.i3 = Interfaces(name=\"Addition\", ci_hash=\"03\",\n ci=\"aaaaaaaaaa aaaaaaaaaa aaaaaaaaaa aaaaaaaaaa aaaaaaaaaa aaaaaaaaaa aaaaaaaaaa aaaa\" \\\n +\"\\n alle meine entchen schwimmen auf dem See\")\n \n self.i1.save()\n self.i2.save()\n self.i3.save()\n\n def import_user(self):\n today = datetime.datetime.today()\n self.user1 = User(username=\"user1\", \n first_name=\"foo\", \n last_name=\"bah\",\n email=\"foo@bah.de\", \n is_staff=False, \n is_active=True,\n is_superuser=False, \n last_login=today,\n date_joined=today)\n self.user2 = User(username=\"user2\",\n first_name=\"bah\",\n last_name=\"foo\",\n email=\"bah@foo.de\",\n is_staff=False,\n is_active=True,\n is_superuser=False,\n last_login=today,\n date_joined=today)\n self.user3 = User(username=\"inactive\",\n first_name=\"abc\",\n last_name=\"xyz\",\n email=\"abc@xyz.de\",\n is_staff=False,\n is_active=False,\n is_superuser=False,\n last_login=today,\n date_joined=today)\n self.user4 = User(username=\"superuser\",\n first_name=\"a\",\n last_name=\"b\",\n email=\"abc@def.de\",\n is_staff=True,\n is_active=True,\n is_superuser=True,\n last_login=today,\n date_joined=today)\n\n self.user1.set_password('teamprojekt')\n self.user2.set_password('teamprojekt')\n self.user3.set_password('teamprojekt')\n self.user4.set_password('teamprojekt')\n\n self.user1.save()\n self.user2.save()\n self.user3.save()\n self.user4.save()\n\n def import_user_rights(self):\n pass\n# permissions = Permission.objects\n# permissions.get(codename=\"can_see_description\").user_set.add(self.user1)\n# permissions.get(codename=\"can_see_description\").user_set.add(self.user2)\n# permissions.get(codename=\"can_see_homecluster\").user_set.add(self.user1)\n# permissions.get(codename=\"can_see_key\").user_set.add(self.user1)\n# permissions.get(codename=\"can_see_path\").user_set.add(self.user1)\n# permissions.get(codename=\"can_see_code\").user_set.add(self.user1)\n# permissions.get(codename=\"can_see_code\").user_set.add(self.user2)\n# permissions.get(codename=\"can_set_active\").user_set.add(self.user1)\n# permissions.get(codename=\"add_group\").user_set.add(self.user1)\n# permissions.get(codename=\"change_user\").user_set.add(self.user1)\n# permissions.get(codename=\"change_group\").user_set.add(self.user1)\n# \n def import_components(self):\n self.co1 = Components(exe_hash=\"foo\",\n description=\"Diese Komponente berechnet die LU-Zerlegung \"+\\\n \"mittels Gauß-Algorithmus\",\n version=\"1.001.0001\")\n self.co2 = Components(exe_hash=\"bar\",\n description=\"DieseDiese Komponente addiert zwei MatrizenDiese Komponente addiert zwei MatrizenDiese Komponente addiert zwei Matrizen Komponente addiert zwei Matrizen\",\n version=\"10001a\")\n self.co3 = Components(exe_hash=\"foobar\",\n description=\"Diese Komponente addiert zwei Zahlen miteinander.\",\n version=\"10000000001\")\n self.co4 = Components(exe_hash=\"barfoo\",\n description=\"Diese Komponente addiert zwei komplexe Zahlen\",\n version=\"10001.a\")\n\n self.co1.save()\n self.co2.save()\n self.co3.save()\n self.co4.save()\n\n self.co1.set_active(self.user4)\n self.co2.set_active(self.user4)\n self.co3.set_active(self.user4)\n self.co4.set_active(self.user4)\n\n def import_connections(self):\n self.ic1 = Interfaces_Components(interface=self.i1,\n component=self.co1)\n self.ic2 = Interfaces_Components(interface=self.i1,\n component=self.co2) \n self.ic3 = Interfaces_Components(interface=self.i2,\n component=self.co2) \n self.ic4 = Interfaces_Components(interface=self.i2,\n component=self.co3) \n self.ic5 = Interfaces_Components(interface=self.i3,\n component=self.co4)\n \n self.ic1.save()\n self.ic2.save()\n self.ic3.save()\n self.ic4.save()\n self.ic5.save()\n\n self.hc1 = Components_Cluster(cluster=self.cl1,\n component=self.co1,\n name=\"LU-Zerlegung\")\n self.hc2 = Components_Cluster(cluster=self.cl2,\n component=self.co1,\n name=\"Gauß-LU-Zerlegung\")\n self.hc3 = Components_Cluster(cluster=self.cl3,\n component=self.co1,\n name=\"LU-Zerlegung\")\n self.hc4 = Components_Cluster(cluster=self.cl3,\n component=self.co2,\n name=\"Matrixaddition\")\n self.hc5 = Components_Cluster(cluster=self.cl1,\n component=self.co3,\n name=\"Addition\")\n self.hc6 = Components_Cluster(cluster=self.cl3,\n component=self.co3,\n name=\"einfache Addition\")\n self.hc7 = Components_Cluster(cluster=self.cl1,\n component=self.co4,\n name=\"KomplexAddition\")\n\n self.hc1.save()\n self.hc2.save()\n self.hc3.save()\n self.hc4.save()\n self.hc5.save()\n self.hc6.save()\n self.hc7.save()\n\n self.p1 = Programmer(component=self.co1, email=\"foo@bah.de\")\n self.p2 = Programmer(component=self.co1, email=\"blabla@bla.de\")\n self.p3 = Programmer(component=self.co2, email=\"blubble@blub.org\")\n self.p4 = Programmer(component=self.co3, email=\"bah@foo.de\")\n self.p5 = Programmer(component=self.co4, email=\"bah@foo.de\")\n self.p6 = Programmer(component=self.co4, email=\"abc@xyz.de\")\n\n self.p1.save()\n self.p2.save()\n self.p3.save()\n self.p4.save()\n self.p5.save()\n self.p6.save()\n", "sub_path": "src/frontend/app/ctlweb/management/commands/demo_data.py", "file_name": "demo_data.py", "file_ext": "py", "file_size_in_byte": 7629, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 8, "usage_type": "name"}, {"api_name": "django.core.management.base.BaseCommand.option_list", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 51, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 69, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "531234260", "text": "\n# coding: utf-8\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom scipy.stats import pearsonr\nfrom keras.layers import Dense, Activation\nfrom keras.models import Sequential\n\n# function encode() is used for doing the LabelEncode and OneHotEncode on the data\ndef encode(data):\n lab_en = LabelEncoder().fit(data)\n lab_en_data = lab_en.transform(data)\n\n onehot_en = OneHotEncoder(sparse=False).fit(lab_en_data.reshape(-1, 1))\n onehot_data = onehot_en.transform(lab_en_data.reshape(-1, 1))\n\n return onehot_data\n\n\ndef main():\n raw_train = pd.read_csv(\"E:/HKUST/5001_Data_Analytics/kaggle/data1028/train.csv\")\n raw_test = pd.read_csv(\"E:/HKUST/5001_Data_Analytics/kaggle/data1028/test.csv\")\n labels = raw_train['time']\n\n raw = raw_train.append(raw_test)\n raw.drop('id',axis=1, inplace=True)\n\n penalty = encode(raw['penalty'].values)\n # Select some useful features\n fea = ['l1_ratio', 'alpha', 'max_iter', 'random_state','n_jobs', 'n_samples', 'n_features',\n 'n_classes', 'n_clusters_per_class', 'n_informative', 'flip_y', 'scale']\n\n # Construct polynomial features based on the original features\n poly = PolynomialFeatures()\n poly_fea = poly.fit_transform(raw[fea])\n poly_fea_df = pd.DataFrame(dict(zip(poly.get_feature_names(),np.transpose(poly_fea))))\n\n # Select features according to their Pearson correlation coefficients\n R = []\n P = []\n poly_fea_df_fea_name = poly_fea_df.columns.values.tolist()\n for col_name in poly_fea_df_fea_name:\n r, p = pearsonr(poly_fea_df[col_name].iloc[:raw_train.shape[0]], labels)\n R.append(r)\n P.append(p)\n\n # Rank the features based on their Pearson correlation coefficients\n d = dict(zip(poly_fea_df_fea_name, R))\n d_sorted = sorted(d.items(), key=lambda item: abs(item[1]), reverse=True)\n pearsonr_fea = []\n\n for i in range(50):\n pearsonr_fea.append(d_sorted[i][0])\n\n piersen = pearsonr_fea.pop(0)\n fea_stand = StandardScaler().fit_transform(poly_fea_df[pearsonr_fea])\n\n # Select features according to their variances:var_thresh = p(1-p)\n # VT = VarianceThreshold(threshold=(.8 * (1 - .8)))\n # fea_stand = VT.fit_transform(fea_stand)\n\n all_fea = np.hstack((penalty, fea_stand))\n\n # split the whole dataset into train_fea data and test_fea data\n # and then split the train_fea into training data and testing data\n # test_fea data is used for prediction\n train_fea = all_fea[:raw_train.shape[0]]\n test_fea = all_fea[raw_train.shape[0]:]\n\n # Select features according to chi2 distribution\n # train_fea = SelectKBest(chi2, k=2).fit_transform(abs(train_fea), labels)\n\n\n model = Sequential()\n model.add(Dense(64, input_dim=53, activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(1, activation='relu'))\n\n model.compile(loss='mse', optimizer='sgd')\n model.fit(train_fea, labels, epochs=1000, batch_size=400, verbose=0)\n model.evaluate(train_fea,labels)\n y_predict = model.predict(test_fea)\n y_predict_out = pd.DataFrame(y_predict)\n y_predict_out.to_csv(\"NNsubmission.csv\", index_label='Id', header = ['time'])\n\nif __name__ == '__main__':\n main()", "sub_path": "NN.py", "file_name": "NN.py", "file_ext": "py", "file_size_in_byte": 3447, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.stats.pearsonr", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 80, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 83, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "33039174", "text": "from django.db.models.signals import post_save, post_delete\nfrom taskmng.models import Task\nfrom own_pusher import *\nfrom rest_framework.renderers import JSONRenderer\nfrom django.contrib.auth.models import User\nfrom django.dispatch import receiver\nfrom django.db.models.signals import m2m_changed\n\n\ndef pusher_worker(instance, created=None):\n pusher = MyPusher(u'131903',\n u'e749c59b174735416abe',\n u'e6ac5822e09619a965fd')\n task = Task.objects.filter(id=instance.id)\n assigned_to = task[0].assigned_to.all().values()\n task = task.values()[0]\n task['assigned_to'] = assigned_to if assigned_to else []\n task['owner'] = User.objects.filter(id=task['owner_id']).values()[0]\n if 'owner_id' in task:\n del task['owner_id']\n\n data = JSONRenderer().render({'method': 'save', 'task': task})\n data = unicode(data, 'utf-8')\n pusher.trigger(u'tasks-channel', u'tasks-changed', data)\n\n\n@receiver(m2m_changed, sender=Task.assigned_to.through)\ndef related_changed(sender, **kwargs):\n if kwargs['action'] == 'post_add':\n instance = kwargs['instance']\n pusher_worker(instance)\n pass\n\n\n@receiver(post_save, sender=Task)\ndef push_save(sender, instance, created=None, **kwargsm):\n pusher_worker(instance, created)\n\n\n@receiver(post_delete, sender=Task)\ndef push_delete(sender, instance, created=None, **kwargsm):\n pusher = MyPusher(u'131903',\n u'e749c59b174735416abe',\n u'e6ac5822e09619a965fd')\n data = JSONRenderer().render({'task': {'id': instance.id},\n 'method': 'delete'})\n data = unicode(data, 'utf-8')\n pusher.trigger(u'tasks-channel', u'tasks-changed', data)\n", "sub_path": "apps/taskmng/signals.py", "file_name": "signals.py", "file_ext": "py", "file_size_in_byte": 1734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "taskmng.models.Task.objects.filter", "line_number": 14, "usage_type": "call"}, {"api_name": "taskmng.models.Task.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "taskmng.models.Task", "line_number": 14, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.renderers.JSONRenderer", "line_number": 22, "usage_type": "call"}, {"api_name": "django.dispatch.receiver", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models.signals.m2m_changed", "line_number": 27, "usage_type": "argument"}, {"api_name": "taskmng.models.Task.assigned_to", "line_number": 27, "usage_type": "attribute"}, {"api_name": "taskmng.models.Task", "line_number": 27, "usage_type": "name"}, {"api_name": "django.dispatch.receiver", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models.signals.post_save", "line_number": 35, "usage_type": "argument"}, {"api_name": "taskmng.models.Task", "line_number": 35, "usage_type": "name"}, {"api_name": "rest_framework.renderers.JSONRenderer", "line_number": 45, "usage_type": "call"}, {"api_name": "django.dispatch.receiver", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models.signals.post_delete", "line_number": 40, "usage_type": "argument"}, {"api_name": "taskmng.models.Task", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "106094462", "text": "#python maps also called chainmap is a type of data structure to manage multiple dicts together\n\n\n#creating a chainmap\nimport collections\n\ndict1 = {'day1': 'Mon', 'day2': 'Tue'}\ndict2 = {'day3': 'Wed', 'day4': 'Thu'}\n\nres = collections.ChainMap(dict1, dict2)\n\nprint(res.maps, '\\n')\n", "sub_path": "CodingInterviewPrep/Data Structures With Python/8-Maps.py", "file_name": "8-Maps.py", "file_ext": "py", "file_size_in_byte": 282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "collections.ChainMap", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "526484871", "text": "##############################################################################\n#\n# Copyright (c) 2002-2006 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"\n$Id$\n\"\"\"\n# Python std. lib\nimport logging\nimport sets\nimport sys\nfrom time import time\nfrom types import StringType\n\n# Other packages\nfrom ZODB.POSException import ConflictError\nfrom ZEO.Exceptions import ClientDisconnected\nfrom zExceptions import Unauthorized\nfrom ExtensionClass import Base\nfrom OFS.SimpleItem import SimpleItem\nfrom AccessControl.SecurityManagement import getSecurityManager\nfrom AccessControl.SecurityInfo import ClassSecurityInformation\nfrom AccessControl.Permissions \\\n import manage_zcatalog_entries, view_management_screens\nfrom OFS.SimpleItem import SimpleItem\nfrom BTrees.OOBTree import OOBTree\nfrom Products.PageTemplates.PageTemplateFile import PageTemplateFile\nfrom Globals import DTMLFile\nfrom Acquisition import Implicit, aq_base, aq_inner, aq_parent\n\n# Local\nfrom CatalogEventQueue import CatalogEventQueue, EVENT_TYPES, ADDED_EVENTS\nfrom CatalogEventQueue import ADDED, CHANGED, CHANGED_ADDED, REMOVED\nfrom CatalogEventQueue import SAFE_POLICY, ALTERNATIVE_POLICY\n\nlogger = logging.getLogger('event.QueueCatalog')\n\n_zcatalog_methods = {\n 'catalog_object': 1,\n 'uncatalog_object': 1,\n 'uniqueValuesFor': 1,\n 'getpath': 1,\n 'getrid': 1,\n 'getobject': 1,\n 'schema': 1,\n 'indexes': 1,\n 'index_objects': 1,\n 'searchResults': 1,\n '__call__': 1,\n 'refreshCatalog': 1,\n 'Indexes': 1,\n 'unrestrictedSearchResults': 1,\n 'manage_addIndex': 1,\n 'manage_addColumn': 1,\n 'manage_catalogClear': 1,\n 'getIndexObjects': 1,\n }\n\n_is_zcatalog_method = _zcatalog_methods.has_key\n\n_views = {}\n\n\nclass QueueConfigurationError(Exception):\n pass\n\n\nclass QueueCatalog(Implicit, SimpleItem):\n \"\"\"Queued ZCatalog (Proxy)\n\n A QueueCatalog delegates most requests to a ZCatalog that is named\n as part of the QueueCatalog configuration.\n\n Requests to catalog or uncatalog objects are queued. They must be\n processed by a separate process (or thread). The queuing provides\n benefits:\n\n - Content-management operations, performed by humans, complete\n much faster, this making the content-management system more\n effiecient for it's users.\n\n - Catalog updates are batched, which makes indexing much more\n efficient.\n\n - Indexing is performed by a single thread, allowing more\n effecient catalog document generation and avoiding conflict\n errors from occuring during indexing.\n\n - When used with ZEO, indexing might e performed on the same\n machine as the storage server, making updates faster.\n\n \"\"\"\n\n security = ClassSecurityInformation()\n\n _immediate_indexes = () # The names of indexes to update immediately\n _location = None\n _immediate_removal = 1 # Flag: don't queue removal\n _immediate_metadata_update = 1 # Flag: don't queue metadata creation\n _process_all_indexes = 0 # Flag: queue-process all, not just non-immediate,\n # indexes\n title = ''\n\n\n # When set, _v_catalog_cache is a tuple containing the wrapped ZCatalog\n # and the REQUEST it is bound to.\n _v_catalog_cache = None\n\n # As an alternative to the original queue conflict handling there is now\n # a policy which will reduce conflicts, but at the cost of possibly having\n # situations where items get cataloged unnecessarily. YMMV.\n _conflict_policy = SAFE_POLICY\n\n def __init__(self, buckets=1009, conflict_policy=SAFE_POLICY):\n self._buckets = buckets\n self._conflict_policy = conflict_policy\n self._clearQueues()\n\n def _clearQueues(self):\n self._queues = [ CatalogEventQueue(self.getConflictPolicy()) \n for i in range(self._buckets) ]\n\n def getTitle(self):\n return self.title\n\n security.declareProtected(view_management_screens, 'setLocation')\n def setLocation(self, location):\n if self._location is not None:\n try:\n self.process()\n except QueueConfigurationError:\n self._clearQueues()\n self._location = location\n\n security.declareProtected(view_management_screens, 'getIndexInfo')\n def getIndexInfo(self):\n try:\n c = self.getZCatalog()\n except QueueConfigurationError:\n return None\n else:\n items = [(ob.id, ob.meta_type) for ob in c.getIndexObjects()]\n items.sort()\n res = []\n for id, meta_type in items:\n res.append({'id': id, 'meta_type': meta_type})\n return res\n\n\n security.declareProtected(view_management_screens, 'getImmediateIndexes')\n def getImmediateIndexes(self):\n return self._immediate_indexes\n\n security.declareProtected(view_management_screens, 'setImmediateIndexes')\n def setImmediateIndexes(self, indexes):\n self._immediate_indexes = tuple(map(str, indexes))\n\n security.declareProtected(view_management_screens, 'getImmediateRemoval')\n def getImmediateRemoval(self):\n return self._immediate_removal\n\n security.declareProtected(view_management_screens, 'setImmediateRemoval')\n def setImmediateRemoval(self, flag):\n self._immediate_removal = bool(flag)\n\n security.declareProtected(view_management_screens,\n 'getImmediateMetadataUpdate')\n def getImmediateMetadataUpdate(self):\n return self._immediate_metadata_update\n\n security.declareProtected(view_management_screens,\n 'setImmediateMetadataUpdate')\n def setImmediateMetadataUpdate(self, flag):\n self._immediate_metadata_update = bool(flag)\n\n security.declareProtected(view_management_screens, 'getProcessAllIndexes')\n def getProcessAllIndexes(self):\n return self._process_all_indexes\n\n security.declareProtected(view_management_screens, 'setProcessAllIndexes')\n def setProcessAllIndexes(self, flag):\n self._process_all_indexes = bool(flag)\n\n security.declareProtected(view_management_screens, 'getBucketCount')\n def getBucketCount(self):\n return self._buckets\n\n security.declareProtected(view_management_screens, 'setBucketCount')\n def setBucketCount(self, count):\n if self._location:\n self.process()\n self._buckets = int(count)\n self._clearQueues()\n\n security.declareProtected(view_management_screens, 'getConflictPolicy')\n def getConflictPolicy(self):\n \"\"\" Return the currently-used conflict policy\n \"\"\"\n return self._conflict_policy\n\n security.declareProtected(view_management_screens, 'setConflictPolicy')\n def setConflictPolicy(self, policy=SAFE_POLICY):\n \"\"\" Set the conflic policy to be used\n \"\"\"\n try:\n policy = int(policy)\n except ValueError:\n return\n\n if ( policy in (SAFE_POLICY, ALTERNATIVE_POLICY) and\n policy != self.getConflictPolicy() ):\n self._conflict_policy = policy\n self._clearQueues()\n\n security.declareProtected(manage_zcatalog_entries, 'getZCatalog')\n def getZCatalog(self, method=''):\n ZC = None\n REQUEST = getattr(self, 'REQUEST', None)\n cache = self._v_catalog_cache\n if cache is not None:\n # The cached catalog may be wrapped with an earlier\n # request. Before using it, check the request.\n (ZC, req) = cache\n if req is not REQUEST:\n # It's an old wrapper. Discard.\n ZC = None\n\n if ZC is None:\n if self._location is None:\n raise QueueConfigurationError(\n \"This QueueCatalog hasn't been \"\n \"configured with a ZCatalog location.\"\n )\n parent = aq_parent(aq_inner(self))\n try:\n ZC = parent.unrestrictedTraverse(self._location)\n except (KeyError, AttributeError):\n raise QueueConfigurationError(\n \"ZCatalog not found at %s.\" % self._location\n )\n if not hasattr(ZC, 'getIndexObjects'): # XXX need a better check\n raise QueueConfigurationError(\n \"The object at %s does not implement the \"\n \"IZCatalog interface.\" % self._location\n )\n\n security_manager = getSecurityManager()\n if not security_manager.validate(self, self, self._location, ZC):\n raise Unauthorized(self._location, ZC)\n\n ZC = aq_base(ZC).__of__(parent)\n self._v_catalog_cache = (ZC, REQUEST)\n\n if method:\n if not _is_zcatalog_method(method):\n raise AttributeError(method)\n m = getattr(ZC, method)\n # Note that permission to access the method may be checked\n # later on. This isn't the right place to check permission.\n return m\n else:\n return ZC\n\n def __getattr__(self, name):\n # The original object must be wrapped, but self isn't, so\n # we return a special object that will do the attribute access\n # on a wrapped object.\n if _is_zcatalog_method(name):\n return AttrWrapper(name)\n\n raise AttributeError(name)\n\n def _update(self, uid, etype):\n t = time()\n self._queues[hash(uid) % self._buckets].update(uid, etype)\n\n security.declareProtected(manage_zcatalog_entries, 'catalog_object')\n def catalog_object(self, obj, uid=None, idxs=None, update_metadata=1):\n # update_metadata=0 is ignored if the queued catalog is set to\n # update metadata during queue processing, rather than immediately\n\n # similarly, limiting the idxs only limits the immediate indexes. If \n # any work needs to be done in the queue processing, it will all be\n # done: we have not implemented partial indexing during queue \n # processing. The only way to avoid any of it is to avoid all of it \n # (i.e., update metadata immediately and don't have any indexes to \n # update on the queued side).\n\n # Make sure the current context is allowed to do this:\n catalog_object = self.getZCatalog('catalog_object')\n\n if uid is None:\n uid = '/'.join(obj.getPhysicalPath())\n elif not isinstance(uid, StringType):\n uid = '/'.join(uid)\n\n catalog = self.getZCatalog()\n cat_indexes = sets.Set(catalog.indexes())\n immediate_indexes = sets.Set(self._immediate_indexes)\n cat_indexes -= immediate_indexes\n\n # The ZCatalog API doesn't allow us to distinguish between\n # adds and updates, so we have to try to figure this out\n # ourselves.\n\n # There's a risk of a race here. What if there is a previously\n # unprocessed add event? If so, then this should be a changed\n # event. If we undo this transaction later, we'll generate a\n # remove event, when we should generate an add changed event.\n # To avoid this, we need to make sure we see consistent values\n # of the event queue. We also need to avoid resolving\n # (non-undo) conflicts of add events. This will slow things\n # down a bit, but adds should be relatively infrequent.\n\n # Now, try to decide if the catalog has the uid (path).\n already_cataloged = cataloged(catalog, uid)\n if not already_cataloged:\n # Looks like we should add, but maybe there's already a\n # pending add event. We'd better check the event queue:\n already_cataloged = (\n self._queues[hash(uid) % self._buckets].getEvent(uid) in\n ADDED_EVENTS)\n\n if idxs and already_cataloged:\n # if not already_cataloged, we index the whole thing\n idxs = sets.Set(idxs)\n immediate_indexes.intersection_update(idxs)\n cat_indexes.intersection_update(idxs)\n\n immediate_metadata = self.getImmediateMetadataUpdate()\n if cat_indexes or update_metadata and not immediate_metadata:\n self._update(uid, already_cataloged and CHANGED or ADDED)\n\n if immediate_indexes:\n # Update some of the indexes immediately.\n catalog.catalog_object(\n obj, uid, immediate_indexes,\n update_metadata=update_metadata and immediate_metadata)\n elif update_metadata and immediate_metadata:\n # if it is added, no point in doing the metadata, and it will be\n # done in the queue process anyway\n catalog._catalog.updateMetadata(obj, uid)\n\n security.declareProtected(manage_zcatalog_entries, 'uncatalog_object')\n def uncatalog_object(self, uid):\n if not isinstance(uid, StringType):\n uid = '/'.join(uid)\n\n self._update(uid, REMOVED)\n\n if self._immediate_removal:\n self._process_queue( self._queues[hash(uid) % self._buckets]\n , limit=None\n )\n\n security.declareProtected(manage_zcatalog_entries, 'process')\n def process(self, max=None):\n \"\"\" Process pending events and return number of events processed. \"\"\"\n if not self.manage_size():\n return 0\n\n count = 0\n for queue in filter(None, self._queues):\n limit = None\n if max:\n # limit the number of events\n limit = max - count\n \n count += self._process_queue(queue, limit)\n\n if max and count >= max:\n # On reaching the maximum, return immediately\n # so the caller can commit the transaction,\n # sleep for a while, or do something else.\n break\n\n return count\n\n def _process_queue(self, queue, limit):\n \"\"\"Process a single queue\"\"\"\n catalog = self.getZCatalog()\n\n if self.getProcessAllIndexes():\n idxs = None\n else:\n cat_indexes = sets.Set(catalog.indexes())\n immediate_indexes = sets.Set(self._immediate_indexes)\n if not immediate_indexes or immediate_indexes==cat_indexes:\n idxs = None # do all of 'em\n else:\n idxs = list(cat_indexes - immediate_indexes)\n events = queue.process(limit)\n count = 0\n\n for uid, (t, event) in events.items():\n if event is REMOVED:\n try:\n if cataloged(catalog, uid):\n catalog.uncatalog_object(uid)\n except (ConflictError, ClientDisconnected):\n raise\n except:\n logger.error('error uncataloging object', exc_info=True)\n else:\n # add or change\n if event is CHANGED and not cataloged(catalog, uid):\n continue\n # Note that the uid may be relative to the catalog.\n obj = catalog.unrestrictedTraverse(uid, None)\n if obj is not None:\n immediate_metadata = self.getImmediateMetadataUpdate()\n try:\n catalog.catalog_object(\n obj, uid, idxs=idxs,\n update_metadata=not immediate_metadata)\n except (ConflictError, ClientDisconnected):\n raise\n except:\n logger.error('error cataloging object', exc_info=True)\n\n count = count + 1\n\n return count\n \n\n #\n # CMF catalog tool methods.\n #\n security.declarePrivate('indexObject')\n def indexObject(self, object):\n \"\"\"Add to catalog.\n \"\"\"\n self.catalog_object(object, self.uidForObject(object))\n\n security.declarePrivate('unindexObject')\n def unindexObject(self, object):\n \"\"\"Remove from catalog.\n \"\"\"\n self.uncatalog_object(self.uidForObject(object))\n\n security.declarePrivate('reindexObject')\n def reindexObject(self, object, idxs=None,update_metadata=1,uid=None):\n \"\"\"Update catalog after object data has changed.\n\n The optional idxs argument is a list of specific indexes\n to update (all of them by default).\n \"\"\"\n self.catalog_object(object, uid or self.uidForObject(object), idxs=idxs,\n update_metadata=update_metadata)\n\n security.declarePrivate('uidForObject')\n def uidForObject(self, obj):\n \"\"\"Get a catalog uid for the object. Allows the underlying catalog\n to determine the uids if it implements this method\"\"\"\n catalog = self.getZCatalog()\n if hasattr(aq_base(catalog), 'uidForObject'):\n return catalog.uidForObject(obj)\n return '/'.join(obj.getPhysicalPath())\n\n # Provide web pages. It would be nice to use views, but Zope 2.6\n # just isn't ready for views. :( In particular, we'd have to fake\n # out the PageTemplateFiles in some brittle way to make them do\n # the right thing. :(\n\n security.declareProtected(view_management_screens, 'manage_editForm')\n manage_editForm = PageTemplateFile('www/edit', globals())\n\n security.declareProtected(view_management_screens, 'manage_getLocation')\n def manage_getLocation(self):\n return self._location or ''\n\n security.declareProtected(view_management_screens, 'manage_edit')\n def manage_edit(self, title='', location='', immediate_indexes=(),\n immediate_removal=0, bucket_count=0, immediate_metadata=0,\n all_indexes=0, conflict_policy=SAFE_POLICY, RESPONSE=None):\n \"\"\" Edit the instance \"\"\"\n self.title = title\n self.setLocation(location or None)\n self.setImmediateIndexes(immediate_indexes)\n self.setImmediateRemoval(immediate_removal)\n self.setImmediateMetadataUpdate(immediate_metadata)\n self.setProcessAllIndexes(all_indexes)\n self.setConflictPolicy(conflict_policy)\n if bucket_count:\n bucket_count = int(bucket_count)\n if bucket_count != self.getBucketCount():\n self.setBucketCount(bucket_count)\n\n if RESPONSE is not None:\n RESPONSE.redirect('%s/manage_editForm?manage_tabs_message='\n 'Properties+changed' % self.absolute_url())\n\n\n security.declareProtected(manage_zcatalog_entries,\n 'list_queue_items')\n def list_queue_items(self, limit=100):\n \"\"\"Return a list of items in the queue.\"\"\"\n items = []\n count = 0\n for queue in filter(None, self._queues):\n qitems = queue._data.keys()\n count += len(qitems)\n items += qitems\n if limit is not None:\n if count > limit:\n items = items[:limit]\n return items\n\n\n security.declareProtected(manage_zcatalog_entries, 'manage_queue')\n manage_queue = DTMLFile('dtml/queue', globals())\n\n security.declareProtected(manage_zcatalog_entries, 'manage_size')\n def manage_size(self):\n size = 0\n for q in self._queues:\n size += len(q)\n\n return size\n\n security.declareProtected(manage_zcatalog_entries, 'manage_process')\n def manage_process(self, count=100, REQUEST=None):\n \"Web UI to manually process queues\"\n count = int(count)\n processed = self.process(max=count)\n if REQUEST is not None:\n msg = '%i Queue item(s) processed' % processed\n return self.manage_queue(manage_tabs_message=msg)\n else:\n return processed\n\n # Provide Zope 2 offerings\n\n index_html = None\n\n meta_type = 'ZCatalog Queue'\n\n manage_options=(\n (\n {'label': 'Configure', 'action': 'manage_editForm',\n 'help':('QueueCatalog','QueueCatalog-Configure.stx')},\n\n {'label': 'Queue', 'action': 'manage_queue',\n 'help':('QueueCatalog','QueueCatalog-Queue.stx')},\n )\n +SimpleItem.manage_options\n )\n\n security.declareObjectPublic()\n # Disallow access to subobjects with no security assertions.\n security.setDefaultAccess('deny')\n\n security.declarePublic('getTitle', 'title_or_id')\n\n security.declareProtected(manage_zcatalog_entries,\n 'catalog_object', 'uncatalog_object')\n\n\ndef cataloged(catalog, path):\n getrid = getattr(catalog, 'getrid', None)\n if getrid is None:\n\n # This is an old catalog that doesn't provide an API for\n # getting an objects rid (and thus determing that the\n # object is already cataloged.\n\n # We'll just use our knowledge of the internal structure.\n\n rid = catalog._catalog.uids.get(path)\n\n else:\n rid = catalog.getrid(path)\n\n return rid is not None\n\nclass AttrWrapper(Base):\n \"Special object that allowes us to use acquisition in QueueCatalog \"\n \"attribute access\"\n\n def __init__(self, name):\n self.__name__ = name\n\n def __of__(self, wrappedQueueCatalog):\n return wrappedQueueCatalog.getZCatalog(self.__name__)\n\n__doc__ = QueueCatalog.__doc__ + __doc__\n\n", "sub_path": "Products.QueueCatalog/trunk/Products/QueueCatalog/QueueCatalog.py", "file_name": "QueueCatalog.py", "file_ext": "py", "file_size_in_byte": 21734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 45, "usage_type": "call"}, {"api_name": "Acquisition.Implicit", "line_number": 77, "usage_type": "name"}, {"api_name": "OFS.SimpleItem.SimpleItem", "line_number": 77, "usage_type": "name"}, {"api_name": "AccessControl.SecurityInfo.ClassSecurityInformation", "line_number": 103, "usage_type": "call"}, {"api_name": "CatalogEventQueue.SAFE_POLICY", "line_number": 121, "usage_type": "name"}, {"api_name": "CatalogEventQueue.SAFE_POLICY", "line_number": 123, "usage_type": "name"}, {"api_name": "CatalogEventQueue.CatalogEventQueue", "line_number": 129, "usage_type": "call"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 135, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 144, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 159, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 163, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 167, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 171, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 175, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 180, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 185, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 189, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 193, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 197, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 204, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 210, "usage_type": "argument"}, {"api_name": "CatalogEventQueue.SAFE_POLICY", "line_number": 211, "usage_type": "name"}, {"api_name": "CatalogEventQueue.SAFE_POLICY", "line_number": 219, "usage_type": "name"}, {"api_name": "CatalogEventQueue.ALTERNATIVE_POLICY", "line_number": 219, "usage_type": "name"}, {"api_name": "AccessControl.Permissions.manage_zcatalog_entries", "line_number": 224, "usage_type": "argument"}, {"api_name": "Acquisition.aq_parent", "line_number": 243, "usage_type": "call"}, {"api_name": "Acquisition.aq_inner", "line_number": 243, "usage_type": "call"}, {"api_name": "AccessControl.SecurityManagement.getSecurityManager", "line_number": 256, "usage_type": "call"}, {"api_name": "zExceptions.Unauthorized", "line_number": 258, "usage_type": "call"}, {"api_name": "Acquisition.aq_base", "line_number": 260, "usage_type": "call"}, {"api_name": "time.time", "line_number": 283, "usage_type": "call"}, {"api_name": "AccessControl.Permissions.manage_zcatalog_entries", "line_number": 286, "usage_type": "argument"}, {"api_name": "types.StringType", "line_number": 303, "usage_type": "argument"}, {"api_name": "sets.Set", "line_number": 307, "usage_type": "call"}, {"api_name": "sets.Set", "line_number": 308, "usage_type": "call"}, {"api_name": "CatalogEventQueue.ADDED_EVENTS", "line_number": 331, "usage_type": "name"}, {"api_name": "sets.Set", "line_number": 335, "usage_type": "call"}, {"api_name": "CatalogEventQueue.CHANGED", "line_number": 341, "usage_type": "name"}, {"api_name": "CatalogEventQueue.ADDED", "line_number": 341, "usage_type": "name"}, {"api_name": "AccessControl.Permissions.manage_zcatalog_entries", "line_number": 353, "usage_type": "argument"}, {"api_name": "types.StringType", "line_number": 355, "usage_type": "argument"}, {"api_name": "CatalogEventQueue.REMOVED", "line_number": 358, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.manage_zcatalog_entries", "line_number": 365, "usage_type": "argument"}, {"api_name": "sets.Set", "line_number": 395, "usage_type": "call"}, {"api_name": "sets.Set", "line_number": 396, "usage_type": "call"}, {"api_name": "CatalogEventQueue.REMOVED", "line_number": 405, "usage_type": "name"}, {"api_name": "ZODB.POSException.ConflictError", "line_number": 409, "usage_type": "name"}, {"api_name": "ZEO.Exceptions.ClientDisconnected", "line_number": 409, "usage_type": "name"}, {"api_name": "CatalogEventQueue.CHANGED", "line_number": 415, "usage_type": "name"}, {"api_name": "ZODB.POSException.ConflictError", "line_number": 425, "usage_type": "name"}, {"api_name": "ZEO.Exceptions.ClientDisconnected", "line_number": 425, "usage_type": "name"}, {"api_name": "Acquisition.aq_base", "line_number": 465, "usage_type": "call"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 474, "usage_type": "argument"}, {"api_name": "Products.PageTemplates.PageTemplateFile.PageTemplateFile", "line_number": 475, "usage_type": "call"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 477, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.view_management_screens", "line_number": 481, "usage_type": "argument"}, {"api_name": "CatalogEventQueue.SAFE_POLICY", "line_number": 484, "usage_type": "name"}, {"api_name": "AccessControl.Permissions.manage_zcatalog_entries", "line_number": 503, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.manage_zcatalog_entries", "line_number": 519, "usage_type": "argument"}, {"api_name": "Globals.DTMLFile", "line_number": 520, "usage_type": "call"}, {"api_name": "AccessControl.Permissions.manage_zcatalog_entries", "line_number": 522, "usage_type": "argument"}, {"api_name": "AccessControl.Permissions.manage_zcatalog_entries", "line_number": 530, "usage_type": "argument"}, {"api_name": "OFS.SimpleItem.SimpleItem.manage_options", "line_number": 555, "usage_type": "attribute"}, {"api_name": "OFS.SimpleItem.SimpleItem", "line_number": 555, "usage_type": "name"}, {"api_name": "AccessControl.Permissions.manage_zcatalog_entries", "line_number": 564, "usage_type": "argument"}, {"api_name": "ExtensionClass.Base", "line_number": 585, "usage_type": "name"}]} +{"seq_id": "31537382", "text": "# 287. Find the Duplicate Number\n\n# Given an array nums containing n + 1 integers where each integer is between 1 and n (inclusive),\n# prove that at least one duplicate number must exist.\n# Assume that there is only one duplicate number, find the duplicate one.\n\n# Example 1:\n\n# Input: [1,3,4,2,2]\n# Output: 2\n# Example 2:\n\n# Input: [3,1,3,4,2]\n# Output: 3\n# Note:\n\n# You must not modify the array (assume the array is read only).\n# You must use only constant, O(1) extra space.\n# Your runtime complexity should be less than O(n2).\n# There is only one duplicate number in the array, but it could be repeated more than once.\nfrom typing import List\n\n\nclass Solution:\n def findDuplicate(self, nums: List[int]) -> int:\n fast = 0\n slow = 0\n while 1:\n slow = nums[slow]\n fast = nums[nums[fast]]\n if slow == fast:\n break\n result = 0\n while result != slow:\n result = nums[result]\n slow = nums[slow]\n return slow\n\n\nif __name__ == \"__main__\":\n from util import Test\n\n s = Solution()\n t = Test(s.findDuplicate)\n t.equal(2, [1, 3, 4, 2, 2])\n t.equal(3, [3, 1, 3, 4, 2])\n", "sub_path": "Python/287.py", "file_name": "287.py", "file_ext": "py", "file_size_in_byte": 1189, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "typing.List", "line_number": 25, "usage_type": "name"}, {"api_name": "util.Test", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "313319323", "text": "# -*- coding: utf-8 -*-\n\n# FLO-2D Preprocessor tools for QGIS\n# Copyright © 2021 Lutra Consulting for FLO-2D\n\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version\nimport datetime\nimport os\nimport sys\nimport math\nimport uuid\nfrom qgis.PyQt.QtWidgets import QMessageBox, QApplication, QProgressDialog\nfrom qgis.PyQt.QtCore import Qt\nfrom collections import defaultdict\nfrom subprocess import Popen, PIPE, STDOUT\nfrom qgis.PyQt.QtGui import QColor\nfrom qgis.core import (\n QgsFeature,\n QgsGeometry,\n QgsPointXY,\n QgsSpatialIndex,\n QgsRasterLayer,\n QgsRaster,\n QgsFeatureRequest,\n QgsRectangle,\n QgsFeedback,\n NULL,\n QgsMarkerSymbol,\n QgsProject ,\n QgsSymbol,\n QgsRendererCategory,\n QgsCategorizedSymbolRenderer,\n QgsRendererRange,\n QgsGraduatedSymbolRenderer\n \n)\n\n\nfrom qgis.analysis import QgsInterpolator, QgsTinInterpolator, QgsZonalStatistics\nfrom ..gui.ui_utils import center_canvas, zoom_show_n_cells\nfrom ..utils import is_number, get_file_path, grid_index, get_grid_index, set_grid_index\nfrom ..errors import GeometryValidityErrors, Flo2dError\n\nimport numpy as np\n\ncellIDNumpyArray = None\nxvalsNumpyArray = None\nyvalsNumpyArray = None\n\ncellElevNumpyArray = None\n\n# GRID classes\nclass TINInterpolator(object):\n def __init__(self, point_lyr, field_name):\n self.lyr = point_lyr\n self.field_name = field_name\n self.lyr_data = None\n self.interpolator = None\n\n def setup_layer_data(self):\n index = self.lyr.fields().lookupField(self.field_name)\n self.lyr_data = QgsInterpolator.LayerData()\n self.lyr_data.interpolationAttribute = index\n self.lyr_data.source = self.lyr\n self.lyr_data.sourceType = 0\n self.lyr_data.useZValue = False\n self.interpolator = QgsTinInterpolator([self.lyr_data])\n\n def tin_at_xy(self, x, y):\n feedback = QgsFeedback()\n success, value = self.interpolator.interpolatePoint(x, y, feedback)\n return success, value\n\n\nclass ZonalStatistics(object):\n def __init__(self, gutils, grid_lyr, point_lyr, field_name, calculation_type, search_distance=0):\n self.gutils = gutils\n self.grid = grid_lyr\n self.points = point_lyr\n self.field = field_name\n self.calculation_type = calculation_type\n self.search_distance = search_distance\n self.uid = uuid.uuid4()\n self.points_feats = None\n self.points_index = None\n self.calculation_method = None\n self.gap_raster = None\n self.filled_raster = None\n self.tmp = os.environ[\"TMP\"]\n self.setup_probing()\n\n @staticmethod\n def calculate_mean(vals):\n result = sum(vals) / len(vals)\n return result\n\n @staticmethod\n def calculate_max(vals):\n result = max(vals)\n return result\n\n @staticmethod\n def calculate_min(vals):\n result = min(vals)\n return result\n\n def setup_probing(self):\n self.points_feats, self.points_index = spatial_index(self.points)\n if self.calculation_type == \"Mean\":\n self.calculation_method = self.calculate_mean\n elif self.calculation_type == \"Max\":\n self.calculation_method = self.calculate_max\n elif self.calculation_type == \"Min\":\n self.calculation_method = self.calculate_min\n self.gap_raster = os.path.join(self.tmp, \"gap_raster_{0}.tif\".format(self.uid))\n self.filled_raster = os.path.join(self.tmp, \"filled_raster_{0}.tif\".format(self.uid))\n self.gutils.execute(\"UPDATE grid SET elevation = NULL;\")\n\n def remove_rasters(self):\n try:\n os.remove(self.gap_raster)\n os.remove(self.filled_raster)\n except OSError as e:\n pass\n\n def points_elevation(self):\n \"\"\"\n Method for calculating grid cell values from point layer.\n \"\"\"\n for feat in self.grid.getFeatures():\n geom = feat.geometry()\n geos_geom = QgsGeometry.createGeometryEngine(geom.constGet())\n geos_geom.prepareGeometry()\n fids = self.points_index.intersects(geom.boundingBox())\n points = []\n for fid in fids:\n point_feat = self.points_feats[fid]\n other_geom = point_feat.geometry()\n isin = geos_geom.intersects(other_geom.constGet())\n if isin is True:\n points.append(point_feat[self.field])\n else:\n pass\n try:\n yield round(self.calculation_method(points), 4), feat[\"fid\"]\n except (ValueError, ZeroDivisionError) as e:\n pass\n\n def rasterize_grid(self):\n grid_extent = self.grid.extent()\n corners = (grid_extent.xMinimum(), grid_extent.yMinimum(), grid_extent.xMaximum(), grid_extent.yMaximum())\n\n command = \"gdal_rasterize\"\n field = \"-a elevation\"\n rtype = \"-ot Float64\"\n rformat = \"-of GTiff\"\n extent = \"-te {0} {1} {2} {3}\".format(*corners)\n res = \"-tr {0} {0}\".format(self.gutils.get_cont_par(\"CELLSIZE\"))\n nodata = \"-a_nodata NULL\"\n compress = \"-co COMPRESS=LZW\"\n predictor = \"-co PREDICTOR=1\"\n vlayer = \"-l grid\"\n gpkg = '\"{0}\"'.format(self.grid.source().split(\"|\")[0])\n raster = '\"{0}\"'.format(self.gap_raster)\n\n parameters = (command, field, rtype, rformat, extent, res, nodata, compress, predictor, vlayer, gpkg, raster)\n cmd = \" \".join(parameters)\n success = False\n loop = 0\n out = None\n while success is False:\n proc = Popen(cmd, shell=True, stdin=open(os.devnull), stdout=PIPE, stderr=STDOUT, universal_newlines=True)\n out = proc.communicate()\n if os.path.exists(self.gap_raster):\n success = True\n else:\n loop += 1\n if loop > 3:\n raise Exception\n return cmd, out\n\n def fill_nodata(self):\n search = \"-md {0}\".format(self.search_distance) if self.search_distance > 0 else \"\"\n cmd = 'gdal_fillnodata {0} \"{1}\" \"{2}\"'.format(search, self.gap_raster, self.filled_raster)\n proc = Popen(cmd, shell=True, stdin=open(os.devnull), stdout=PIPE, stderr=STDOUT, universal_newlines=True)\n out = proc.communicate()\n return cmd, out\n\n def null_elevation(self):\n req = QgsFeatureRequest().setFilterExpression('\"elevation\" IS NULL')\n elev_fid = raster2grid(self.grid, self.filled_raster, request=req)\n return elev_fid\n\n def set_elevation(self, elev_fid):\n \"\"\"\n Setting elevation values inside 'grid' table.\n \"\"\"\n set_qry = \"UPDATE grid SET elevation = ? WHERE fid = ?;\"\n cur = self.gutils.con.cursor()\n for el, fid in elev_fid:\n cur.execute(set_qry, (el, fid))\n self.gutils.con.commit()\n\n\nclass ZonalStatisticsOther(object):\n def __init__(self, gutils, grid_lyr, grid_field, point_lyr, field_name, calculation_type, search_distance=0):\n self.gutils = gutils\n self.grid = grid_lyr\n self.points = point_lyr\n self.grid_field = grid_field\n self.field = field_name\n self.calculation_type = calculation_type\n self.search_distance = search_distance\n self.uid = uuid.uuid4()\n self.points_feats = None\n self.points_index = None\n self.calculation_method = None\n self.gap_raster = None\n self.filled_raster = None\n self.tmp = os.environ[\"TMP\"]\n self.setup_probing()\n\n @staticmethod\n def calculate_mean(vals):\n result = sum(vals) / len(vals)\n return result\n\n @staticmethod\n def calculate_max(vals):\n result = max(vals)\n return result\n\n @staticmethod\n def calculate_min(vals):\n result = min(vals)\n return result\n\n def setup_probing(self):\n self.points_feats, self.points_index = spatial_index(self.points)\n if self.calculation_type == \"Mean\":\n self.calculation_method = self.calculate_mean\n elif self.calculation_type == \"Max\":\n self.calculation_method = self.calculate_max\n elif self.calculation_type == \"Min\":\n self.calculation_method = self.calculate_min\n self.gap_raster = os.path.join(self.tmp, \"gap_raster_{0}.tif\".format(self.uid))\n self.filled_raster = os.path.join(self.tmp, \"filled_raster_{0}.tif\".format(self.uid))\n\n if self.grid_field == \"water_elevation\":\n self.gutils.execute(\"UPDATE grid SET water_elevation = NULL;\")\n elif self.grid_field == \"flow_depth\":\n self.gutils.execute(\"UPDATE grid SET flow_depth = NULL;\")\n\n def remove_rasters(self):\n try:\n os.remove(self.gap_raster)\n os.remove(self.filled_raster)\n except OSError as e:\n pass\n\n def points_elevation(self):\n \"\"\"\n Method for calculating grid cell values from point layer.\n \"\"\"\n for feat in self.grid.getFeatures():\n geom = feat.geometry()\n geos_geom = QgsGeometry.createGeometryEngine(geom.constGet())\n geos_geom.prepareGeometry()\n fids = self.points_index.intersects(geom.boundingBox())\n points = []\n for fid in fids:\n point_feat = self.points_feats[fid]\n other_geom = point_feat.geometry()\n isin = geos_geom.intersects(other_geom.constGet())\n if isin is True:\n points.append(point_feat[self.field])\n else:\n pass\n try:\n yield round(self.calculation_method(points), 4), feat[\"fid\"]\n except (ValueError, ZeroDivisionError) as e:\n pass\n\n def rasterize_grid(self):\n grid_extent = self.grid.extent()\n corners = (grid_extent.xMinimum(), grid_extent.yMinimum(), grid_extent.xMaximum(), grid_extent.yMaximum())\n\n command = \"gdal_rasterize\"\n field = \"-a elevation\"\n rtype = \"-ot Float64\"\n rformat = \"-of GTiff\"\n extent = \"-te {0} {1} {2} {3}\".format(*corners)\n res = \"-tr {0} {0}\".format(self.gutils.get_cont_par(\"CELLSIZE\"))\n nodata = \"-a_nodata NULL\"\n compress = \"-co COMPRESS=LZW\"\n predictor = \"-co PREDICTOR=1\"\n vlayer = \"-l grid\"\n gpkg = '\"{0}\"'.format(self.grid.source().split(\"|\")[0])\n raster = '\"{0}\"'.format(self.gap_raster)\n\n parameters = (command, field, rtype, rformat, extent, res, nodata, compress, predictor, vlayer, gpkg, raster)\n cmd = \" \".join(parameters)\n success = False\n loop = 0\n out = None\n while success is False:\n proc = Popen(cmd, shell=True, stdin=open(os.devnull), stdout=PIPE, stderr=STDOUT, universal_newlines=True)\n out = proc.communicate()\n if os.path.exists(self.gap_raster):\n success = True\n else:\n loop += 1\n if loop > 3:\n raise Exception\n return cmd, out\n\n def fill_nodata(self):\n search = \"-md {0}\".format(self.search_distance) if self.search_distance > 0 else \"\"\n cmd = 'gdal_fillnodata {0} \"{1}\" \"{2}\"'.format(search, self.gap_raster, self.filled_raster)\n proc = Popen(cmd, shell=True, stdin=open(os.devnull), stdout=PIPE, stderr=STDOUT, universal_newlines=True)\n out = proc.communicate()\n return cmd, out\n\n def null_elevation(self):\n req = QgsFeatureRequest().setFilterExpression('\"water_elevation\" IS NULL')\n elev_fid = raster2grid(self.grid, self.filled_raster, request=req)\n return elev_fid\n\n def set_other(self, elev_fid):\n \"\"\"\n Setting values inside 'grid' table.\n \"\"\"\n if self.grid_field == \"water_elevation\":\n set_qry = \"UPDATE grid SET water_elevation = ? WHERE fid = ?;\"\n elif self.grid_field == \"flow_depth\":\n set_qry = \"UPDATE grid SET flow_depth = ? WHERE fid = ?;\"\n\n cur = self.gutils.con.cursor()\n for el, fid in elev_fid:\n cur.execute(set_qry, (el, fid))\n self.gutils.con.commit()\n\n\ndef debugMsg(msg_string):\n msgBox = QMessageBox()\n msgBox.setText(msg_string)\n msgBox.exec_()\n\n\ndef show_error(msg):\n exc_type, exc_obj, exc_tb = sys.exc_info()\n filename = exc_tb.tb_frame.f_code.co_filename\n function = exc_tb.tb_frame.f_code.co_name\n line = str(exc_tb.tb_lineno)\n ms_box = QMessageBox(\n QMessageBox.Critical,\n \"Error\",\n msg\n + \"\\n\\n\"\n + \"Error:\\n \"\n + str(exc_obj)\n + \"\\n\\n\"\n + \"In file:\\n \"\n + filename\n + \"\\n\\n\"\n + \"In function:\\n \"\n + function\n + \"\\n\\n\"\n + \"On line \"\n + line,\n )\n ms_box.exec_()\n ms_box.show()\n\n\ndef polygons_statistics(vlayer, rlayer, statistics):\n zonalstats = QgsZonalStatistics(vlayer, rlayer, \"\", 1, statistics)\n res = zonalstats.calculateStatistics(None)\n return res\n\n\n# GRID functions\ndef spatial_index(vlayer, request=None):\n \"\"\"\n Creating spatial index over collection of features.\n \"\"\"\n allfeatures = {}\n index = QgsSpatialIndex()\n for feat in vlayer.getFeatures() if request is None else vlayer.getFeatures(request):\n feat_copy = QgsFeature(feat)\n allfeatures[feat.id()] = feat_copy\n index.insertFeature(feat_copy)\n return allfeatures, index\n\n\ndef spatial_centroids_index(vlayer, request=None):\n \"\"\"\n Creating spatial index over collection of features centroids.\n \"\"\"\n allfeatures = {}\n index = QgsSpatialIndex()\n for feat in vlayer.getFeatures() if request is None else vlayer.getFeatures(request):\n feat_copy = QgsFeature(feat)\n feat_copy.setGeometry(feat_copy.geometry().centroid())\n allfeatures[feat.id()] = feat_copy\n index.insertFeature(feat_copy)\n return allfeatures, index\n\n\ndef intersection_spatial_index(vlayer, request=None):\n \"\"\"\n Creating optimized for intersections spatial index over collection of features.\n \"\"\"\n allfeatures = {}\n index = QgsSpatialIndex()\n max_fid = max(vlayer.allFeatureIds()) + 1\n for feat in vlayer.getFeatures() if request is None else vlayer.getFeatures(request):\n geom = feat.geometry()\n if not geom.isGeosValid():\n geom = geom.buffer(0.0, 5)\n if not geom.isGeosValid():\n error_messages = [\n \"{ge.what()} at location: {ge.where().toString()}\"\n for ge in geom.validateGeometry(method=QgsGeometry.ValidatorGeos)\n ]\n raise GeometryValidityErrors(\"\\n\".join(error_messages))\n new_geoms = divide_geom(geom)\n new_fid = True if len(new_geoms) > 1 else False\n for g in new_geoms:\n engine = QgsGeometry.createGeometryEngine(g.constGet())\n engine.prepareGeometry()\n feat_copy = QgsFeature(feat)\n feat_copy.setGeometry(g)\n if new_fid is True:\n fid = max_fid\n feat_copy.setId(fid)\n max_fid += 1\n else:\n fid = feat.id()\n allfeatures[fid] = (feat_copy, engine)\n index.insertFeature(feat_copy)\n\n return allfeatures, index\n\ndef count_polygon_vertices(geom):\n \"\"\"\n Function for counting polygon vertices.\n \"\"\"\n c = sum(1 for _ in geom.vertices())\n return c\n\n\ndef divide_geom(geom, threshold=1000):\n \"\"\"\n Recursive function for dividing complex polygons into smaller chunks using geometry bounding box.\n \"\"\"\n if count_polygon_vertices(geom) <= threshold:\n return [geom]\n bbox = geom.boundingBox()\n center_x, center_y = bbox.center()\n xmin, ymin = bbox.xMinimum(), bbox.yMinimum()\n xmax, ymax = bbox.xMaximum(), bbox.yMaximum()\n center_point = QgsPointXY(center_x, center_y)\n s1 = QgsGeometry.fromPolygonXY(\n [[center_point, QgsPointXY(center_x, ymin), QgsPointXY(xmin, ymin), QgsPointXY(xmin, center_y), center_point]]\n )\n s2 = QgsGeometry.fromPolygonXY(\n [[center_point, QgsPointXY(xmin, center_y), QgsPointXY(xmin, ymax), QgsPointXY(center_x, ymax), center_point]]\n )\n s3 = QgsGeometry.fromPolygonXY(\n [[center_point, QgsPointXY(center_x, ymax), QgsPointXY(xmax, ymax), QgsPointXY(xmax, center_y), center_point]]\n )\n s4 = QgsGeometry.fromPolygonXY(\n [[center_point, QgsPointXY(xmax, center_y), QgsPointXY(xmax, ymin), QgsPointXY(center_x, ymin), center_point]]\n )\n\n new_geoms = []\n for s in [s1, s2, s3, s4]:\n part = geom.intersection(s)\n if part.isEmpty():\n continue\n if part.isMultipart():\n single_geoms = [QgsGeometry.fromPolygonXY(g) for g in part.asMultiPolygon()]\n for sg in single_geoms:\n new_geoms += divide_geom(sg, threshold)\n continue\n count = count_polygon_vertices(part)\n if count <= threshold:\n new_geoms.append(part)\n else:\n new_geoms += divide_geom(part, threshold)\n return new_geoms\n\n\ndef build_grid(boundary, cell_size, upper_left_coords=None):\n \"\"\"\n Generator which creates grid with given cell size and inside given boundary layer.\n \"\"\"\n half_size = cell_size * 0.5\n biter = boundary.getFeatures()\n feature = next(biter)\n geom = feature.geometry()\n bbox = geom.boundingBox()\n xmin = bbox.xMinimum()\n xmax = bbox.xMaximum()\n ymax = bbox.yMaximum()\n ymin = bbox.yMinimum()\n # xmin = math.floor(bbox.xMinimum())\n # xmax = math.ceil(bbox.xMaximum())\n # ymax = math.ceil(bbox.yMaximum())\n # ymin = math.floor(bbox.yMinimum())\n if upper_left_coords:\n xmin,ymax = upper_left_coords \n cols = int(math.ceil(abs(xmax - xmin) / cell_size))\n rows = int(math.ceil(abs(ymax - ymin) / cell_size))\n x = xmin + half_size\n y = ymax - half_size\n geos_geom_engine = QgsGeometry.createGeometryEngine(geom.constGet())\n geos_geom_engine.prepareGeometry()\n for col in range(cols):\n y_tmp = y\n for row in range(rows):\n pnt = QgsGeometry.fromPointXY(QgsPointXY(x, y_tmp))\n if geos_geom_engine.intersects(pnt.constGet()):\n poly = (\n x - half_size,\n y_tmp - half_size,\n x + half_size,\n y_tmp - half_size,\n x + half_size,\n y_tmp + half_size,\n x - half_size,\n y_tmp + half_size,\n x - half_size,\n y_tmp - half_size,\n )\n yield poly\n else:\n pass\n y_tmp -= cell_size\n x += cell_size\n\ndef build_grid_and_tableColRow(boundary, cell_size):\n \"\"\"\n Generator which creates grid with given cell size and inside given boundary layer.\n \"\"\"\n half_size = cell_size * 0.5\n biter = boundary.getFeatures()\n feature = next(biter)\n geom = feature.geometry()\n bbox = geom.boundingBox()\n xmin = bbox.xMinimum()\n xmax = bbox.xMaximum()\n ymax = bbox.yMaximum()\n ymin = bbox.yMinimum()\n # xmin = math.floor(bbox.xMinimum())\n # xmax = math.ceil(bbox.xMaximum())\n # ymax = math.ceil(bbox.yMaximum())\n # ymin = math.floor(bbox.yMinimum()) \n cols = int(math.ceil(abs(xmax - xmin) / cell_size))\n rows = int(math.ceil(abs(ymax - ymin) / cell_size))\n x = xmin + half_size\n y = ymax - half_size\n geos_geom_engine = QgsGeometry.createGeometryEngine(geom.constGet())\n geos_geom_engine.prepareGeometry()\n for col in range(cols):\n y_tmp = y\n for row in range(rows):\n pnt = QgsGeometry.fromPointXY(QgsPointXY(x, y_tmp))\n if geos_geom_engine.intersects(pnt.constGet()):\n poly = (\n x - half_size,\n y_tmp - half_size,\n x + half_size,\n y_tmp - half_size,\n x + half_size,\n y_tmp + half_size,\n x - half_size,\n y_tmp + half_size,\n x - half_size,\n y_tmp - half_size,\n )\n yield (poly, col + 2, abs(row - rows) + 1)\n else:\n pass\n y_tmp -= cell_size\n x += cell_size\n\n\ndef assign_col_row_indexes_to_grid(grid, gutils):\n cell_size = float(gutils.get_cont_par(\"CELLSIZE\"))\n ext = grid.extent()\n xmin = ext.xMinimum()\n ymin = ext.yMinimum() \n qry = \"UPDATE grid SET col = ?, row = ? WHERE fid = ?\"\n qry_values = []\n for i, cell in enumerate(grid.getFeatures(), 1):\n geom = cell.geometry()\n xx, yy = geom.centroid().asPoint()\n col = int((xx - xmin)/cell_size) + 2\n row = int((yy - ymin)/cell_size) + 2 \n qry_values.append((col, row, i))\n \n cur = gutils.con.cursor()\n cur.executemany(qry, qry_values)\n gutils.con.commit() \n \ndef poly2grid(grid, polygons, request, use_centroids, get_fid, get_grid_geom, threshold, *columns):\n \"\"\"\n Generator for assigning values from any polygon layer to target grid layer.\n \"\"\"\n try:\n grid_feats = grid.getFeatures()\n first = next(grid_feats)\n grid_area = first.geometry().area()\n except StopIteration:\n return\n\n if use_centroids is True:\n\n def geos_compare(geos1, geos2):\n return True\n\n else:\n\n def geos_compare(geos1, geos2):\n inter_area = geos1.intersection(geos2).area()\n if inter_area / grid_area < threshold:\n return False\n else:\n return True\n\n if get_grid_geom is True:\n\n def default_geom(geom):\n return [geom]\n\n else:\n\n def default_geom(geom):\n return []\n\n if get_fid is True:\n\n def default_value(feat_id):\n return [feat_id]\n\n else:\n\n def default_value(feat_id):\n return []\n\n allfeatures, index = spatial_centroids_index(grid) if use_centroids is True else spatial_index(grid)\n polygon_features = polygons.getFeatures() if request is None else polygons.getFeatures(request)\n\n for feat in polygon_features:\n fid = feat.id()\n geom = feat.geometry()\n geos_geom_engine = QgsGeometry.createGeometryEngine(geom.constGet())\n geos_geom_engine.prepareGeometry()\n for gid in index.intersects(geom.boundingBox()):\n grid_feat = allfeatures[gid]\n other_geom = grid_feat.geometry()\n other_geom_geos = other_geom.constGet()\n isin = geos_geom_engine.intersects(other_geom_geos)\n if isin is not True or geos_compare(geos_geom_engine, other_geom_geos) is False:\n continue\n values = default_geom(other_geom)\n values += default_value(fid)\n for col in columns:\n try:\n val = feat[col]\n except KeyError:\n val = NULL\n values.append(val)\n values.append(gid)\n values = tuple(values)\n yield values\n\n\ndef poly2poly(base_polygons, polygons, request, area_percent, *columns):\n \"\"\"\n Generator which calculates base polygons intersections with another polygon layer.\n \"\"\"\n allfeatures, index = spatial_index(polygons, request)\n\n base_features = base_polygons.getFeatures() if request is None else base_polygons.getFeatures(request)\n for feat in base_features:\n base_geom = feat.geometry()\n base_area = base_geom.area()\n fids = index.intersects(base_geom.boundingBox())\n if not fids:\n continue\n base_fid = feat.id()\n base_parts = []\n for fid in fids:\n f = allfeatures[fid]\n fgeom = f.geometry()\n inter = fgeom.intersects(base_geom)\n if inter is False:\n continue\n intersection_geom = fgeom.intersection(base_geom)\n subarea = intersection_geom.area() if area_percent is False else intersection_geom.area() / base_area\n values = tuple(f[col] for col in columns) + (subarea,)\n base_parts.append(values)\n yield base_fid, base_parts\n\n\ndef poly2poly_geos(base_polygons, polygons, request = None, *columns):\n \"\"\"\n Generator which calculates base polygons intersections with another polygon layer.\n\n \"\"\"\n\n allfeatures, index = intersection_spatial_index(polygons) if request is None else intersection_spatial_index(polygons, request)\n # allfeatures, index = intersection_spatial_index(polygons)\n\n base_features = base_polygons.getFeatures() if request is None else base_polygons.getFeatures(request)\n for feat in base_features:\n base_geom = feat.geometry()\n fids = index.intersects(base_geom.boundingBox())\n if not fids:\n continue\n base_fid = feat.id()\n base_area = base_geom.area()\n base_geom_geos = base_geom.constGet()\n base_geom_engine = QgsGeometry.createGeometryEngine(base_geom_geos)\n base_geom_engine.prepareGeometry()\n base_parts = []\n for fid in fids:\n f, other_geom_engine = allfeatures[fid]\n inter = other_geom_engine.intersects(base_geom_geos)\n if inter is False:\n continue\n if other_geom_engine.contains(base_geom_geos):\n subarea = 1\n elif base_geom_engine.contains(f.geometry().constGet()):\n subarea = other_geom_engine.area() / base_area\n else:\n intersection_geom = other_geom_engine.intersection(base_geom_geos)\n if not intersection_geom:\n continue\n subarea = intersection_geom.area() / base_area\n values = tuple(f[col] for col in columns) + (subarea,)\n base_parts.append(values)\n yield base_fid, base_parts\n\n\n\ndef grid_roughness(grid, gridArea, roughness, col):\n \"\"\"\n Generator which calculates grid polygons intersections with Manning layer.\n \"\"\"\n manningFeatures, index = intersection_spatial_index(roughness)\n gridFeatures = grid.getFeatures()\n\n for gridFeat in gridFeatures:\n gridGeom = gridFeat.geometry()\n fids = index.intersects(gridGeom.boundingBox())\n if not fids:\n continue\n gridFid = gridFeat.id()\n # gridArea = gridGeom.area()\n gridGeomGeos = gridGeom.constGet() # constant abstract geometry primitive (faster than get() method)\n gridGeomEngine = QgsGeometry.createGeometryEngine(gridGeomGeos)\n gridGeomEngine.prepareGeometry() # Prepares the geometry, so that subsequent calls to spatial relation methods are much faster.\n gridParts = []\n for fid in fids:\n f, manningGeomEngine = manningFeatures[fid]\n inter = manningGeomEngine.intersects(gridGeomGeos)\n if inter is False:\n continue\n if manningGeomEngine.contains(gridGeomGeos):\n subarea = 1\n elif gridGeomEngine.contains(f.geometry().constGet()):\n subarea = manningGeomEngine.area() / gridArea\n else:\n intersection_geom = manningGeomEngine.intersection(gridGeomGeos)\n if not intersection_geom:\n continue\n subarea = intersection_geom.area() / gridArea\n values = tuple((f[col], subarea))\n gridParts.append(values)\n yield gridFid, gridParts\n\n\ndef grid_sections(grid, polygons, request, *columns):\n \"\"\"\n Function for finding intersections of polygon layer within grid layer.\n \"\"\"\n try:\n grid_feats = grid.getFeatures()\n first = next(grid_feats)\n grid_area = first.geometry().area()\n except StopIteration:\n return\n\n allfeatures, index = intersection_spatial_index(grid, request)\n polygon_features = polygons.getFeatures() if request is None else polygons.getFeatures(request)\n\n grid_parts = defaultdict(list)\n for feat in polygon_features:\n geom = feat.geometry()\n ids = index.intersects(geom.boundingBox())\n if not ids:\n continue\n geos_geom = geom.constGet()\n geom_engine = QgsGeometry.createGeometryEngine(geos_geom)\n geom_engine.prepareGeometry()\n attributes = tuple(feat[col] for col in columns)\n\n for gid in ids:\n grid_feat, other_geom_engine = allfeatures[gid]\n other_geom = grid_feat.geometry()\n other_geom_geos = other_geom.constGet()\n if geom_engine.contains(other_geom_geos):\n subarea = 1\n elif other_geom_engine.contains(geos_geom):\n subarea = other_geom_geos.area() / grid_area\n elif geom_engine.intersects(other_geom_geos):\n subarea = geom_engine.intersection(other_geom_geos).area() / grid_area\n else:\n continue\n values = attributes + (subarea,)\n grid_parts[gid].append(values)\n\n return grid_parts\n\n\ndef cluster_polygons(polygons, *columns):\n \"\"\"\n Functions for clustering polygons by common attributes.\n \"\"\"\n clusters = defaultdict(list)\n for feat in polygons.getFeatures():\n geom_poly = feat.geometry().asPolygon()\n attrs = tuple(feat[col] for col in columns)\n clusters[attrs].append(QgsGeometry.fromPolygonXY(geom_poly))\n return clusters\n\n\ndef clustered_features(polygons, fields, *columns, **columns_map):\n \"\"\"\n Generator which returns features with clustered geometries.\n \"\"\"\n clusters = cluster_polygons(polygons, *columns)\n target_columns = [columns_map[c] if c in columns_map else c for c in columns]\n for attrs, geom_list in list(clusters.items()):\n\n if len(geom_list) > 1:\n geom = QgsGeometry.unaryUnion(geom_list)\n if geom.isMultipart():\n poly_geoms = [QgsGeometry.fromPolygonXY(g) for g in geom.asMultiPolygon()]\n else:\n poly_geoms = [geom]\n else:\n poly_geoms = geom_list\n for new_geom in poly_geoms:\n new_feat = QgsFeature()\n new_feat.setGeometry(new_geom)\n new_feat.setFields(fields)\n for col, val in zip(target_columns, attrs):\n new_feat.setAttribute(col, val)\n yield new_feat\n\n\ndef calculate_spatial_variable_from_polygons(grid, areas, use_centroids=True):\n \"\"\"\n Generator which calculates values based on polygons representing values.\n \"\"\"\n allfeatures, index = spatial_index(areas)\n features = grid.getFeatures()\n\n def get_geom(feature):\n return feature.geometry()\n\n def get_centroid(feature):\n return feature.geometry().centroid()\n\n get_geom_fn = get_centroid if use_centroids is True else get_geom\n for feat in features: # for each grid feature\n geom = get_geom_fn(feat)\n fids = index.intersects(geom.boundingBox())\n for fid in fids:\n f = allfeatures[fid]\n fgeom = f.geometry()\n inter = fgeom.intersects(geom)\n if inter is True:\n yield f.id(), feat.id()\n else:\n pass\n\n\ndef calculate_spatial_variable_from_lines(grid, lines, request=None):\n \"\"\"\n Generator which calculates values based on lines representing values\n yields (grid id, feature id, grid elev).\n \"\"\"\n \n allfeatures, index = spatial_index(lines, request)\n features = grid.getFeatures() if request is None else grid.getFeatures(request)\n for feat in features: # for each grid feature\n geom = feat.geometry() # cell square (a polygon)\n gelev = feat['elevation']\n fids = index.intersects(geom.boundingBox()) # c\n for fid in fids:\n f = allfeatures[fid]\n fgeom = f.geometry()\n inter = fgeom.intersects(geom)\n if inter is True:\n centroid = geom.centroid()\n yield (f.id(), feat.id(), gelev)\n else:\n pass\n \n\ndef calculate_gutter_variable_from_lines(grid, lines):\n \"\"\"\n Generator which calculates values based on lines representing values.\n \"\"\"\n allfeatures, index = spatial_index(lines)\n features = grid.getFeatures()\n for feat in features: # for each grid feature\n geom = feat.geometry() # cell square (a polygon)\n fids = index.intersects(geom.boundingBox()) # c\n for fid in fids:\n f = allfeatures[fid]\n fgeom = f.geometry()\n inter = fgeom.intersects(geom)\n if inter is True:\n centroid = geom.centroid()\n yield (f.id(), feat.id())\n else:\n pass\n\ndef raster2grid(grid, out_raster, request=None):\n \"\"\"\n Generator for probing raster data within 'grid' features.\n \"\"\"\n probe_raster = QgsRasterLayer(out_raster)\n if not probe_raster.isValid():\n return\n\n features = grid.getFeatures() if request is None else grid.getFeatures(request)\n for feat in features:\n center = feat.geometry().centroid().asPoint()\n ident = probe_raster.dataProvider().identify(center, QgsRaster.IdentifyFormatValue)\n # ident is the value of the query provided by the identify method of the dataProvider.\n if ident.isValid():\n if is_number(ident.results()[1]):\n val = round(ident.results()[1], 4)\n else:\n val = None\n yield val, feat.id()\n\n\ndef rasters2centroids(vlayer, request, *raster_paths):\n \"\"\"\n Generator for probing raster data by centroids.\n\n Parameters:\n -----------\n vlayer: usually the grid layer.\n request:\n *raster_pathts: list of ASCII files (with path).\n\n \"\"\"\n features = vlayer.getFeatures() if request is None else vlayer.getFeatures(request)\n centroids = []\n for feat in features:\n fid = feat.id()\n center_point = feat.geometry().centroid().asPoint()\n centroids.append((fid, center_point))\n\n # 'centroids' has the coordinates (x,y) of the centroids of all features of vlayer (ususlly the grid layer)\n for pth in raster_paths:\n raster_values = []\n rlayer = QgsRasterLayer(\n pth\n ) # rlayer is an instance of the layer constructed from file pth (from list raster_paths).\n # Loads (or assigns a raster style), populates its bands, calculates its extend,\n # determines if the layers is gray, paletted, or multiband, assign sensible\n # defaults for the red, green, blue and gray bands.\n if not rlayer.isValid():\n continue\n raster_provider = rlayer.dataProvider()\n for fid, point in centroids:\n ident = raster_provider.identify(point, QgsRaster.IdentifyFormatValue)\n # ident is the value of the query provided by the identify method of the dataProvider.\n if ident.isValid():\n if is_number(ident.results()[1]):\n val = round(ident.results()[1], 4)\n else:\n val = None\n raster_values.append((val, fid))\n yield raster_values\n\n\n# Tools which use GeoPackageUtils instance\ndef square_grid(gutils, boundary, upper_left_coords=None):\n \"\"\"\n Function for calculating and writing square grid into 'grid' table.\n \"\"\"\n cellsize = float(gutils.get_cont_par(\"CELLSIZE\"))\n update_cellsize = \"UPDATE user_model_boundary SET cell_size = ?;\"\n gutils.execute(update_cellsize, (cellsize,))\n gutils.clear_tables(\"grid\")\n\n polygons = ((gutils.build_square_from_polygon(poly),) for poly in build_grid(boundary, cellsize, upper_left_coords))\n sql = [\"\"\"INSERT INTO grid (geom) VALUES\"\"\", 1]\n for g_tuple in polygons:\n sql.append(g_tuple)\n if len(sql) > 2:\n gutils.batch_execute(sql)\n else:\n pass\n\ndef square_grid_with_col_and_row_fields(gutils, boundary, upper_left_coords=None):\n \n # \"\"\"\n # Function for calculating and writing square grid into 'grid' table.\n # \"\"\"\n #\n # cellsize = float(gutils.get_cont_par(\"CELLSIZE\"))\n # update_cellsize = \"UPDATE user_model_boundary SET cell_size = ?;\"\n # gutils.execute(update_cellsize, (cellsize,))\n # gutils.clear_tables(\"grid\")\n #\n # sql = [\"\"\"INSERT INTO grid (geom, col, row) VALUES\"\"\", 3] \n # polygonsClRw = ((gutils.build_square_from_polygon2(polyColRow), ) for polyColRow in build_grid_and_tableColRow(boundary, cellsize))\n # for g_tuple in polygonsClRw:\n # sql.append((g_tuple[0][0], g_tuple[0][1], g_tuple[0][2],))\n # if len(sql) > 2:\n # gutils.batch_execute(sql)\n # else:\n # pass \n \n \n \n \"\"\"\n Function for calculating and writing square grid into 'grid' table.\n \"\"\"\n try:\n cellsize = float(gutils.get_cont_par(\"CELLSIZE\"))\n update_cellsize = \"UPDATE user_model_boundary SET cell_size = ?;\"\n gutils.execute(update_cellsize, (cellsize,))\n gutils.clear_tables(\"grid\")\n \n sql = [\"\"\"INSERT INTO grid (geom, col, row) VALUES\"\"\", 3] \n polygonsClRw = ((gutils.build_square_from_polygon2(polyColRow), ) \n for polyColRow in build_grid_and_tableColRow(boundary, cellsize))\n for g_tuple in polygonsClRw:\n sql.append((g_tuple[0][0], g_tuple[0][1], g_tuple[0][2],))\n if len(sql) > 2:\n gutils.batch_execute(sql)\n else:\n pass\n return True\n except:\n QApplication.restoreOverrideCursor()\n show_error(\n \"ERROR 300521.0526: creating grid with 'col' and 'row' fields failed !\\n\"\n \"_____________________________________________________________________\"\n )\n return False \n\ndef add_col_and_row_fields(grid):\n try:\n caps = grid.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n grid.dataProvider().addAttributes([QgsField('col', QVariant.Int), QgsField('row', QVariant.Int)])\n grid.updateFields()\n return True\n except:\n QApplication.restoreOverrideCursor()\n show_error(\n \"ERROR 300521.1111: creating grid with 'col' and 'row' fields failed !\\n\"\n \"_____________________________________________________________________\"\n )\n return False \ndef evaluate_roughness(gutils, grid, roughness, column_name, method, reset=False):\n \"\"\"\n Updating roughness values inside 'grid' table.\n \"\"\"\n try:\n # start_time = time.time()\n\n if reset is True:\n default = gutils.get_cont_par(\"MANNING\")\n gutils.execute(\"UPDATE grid SET n_value=?;\", (default,))\n else:\n pass\n qry = \"UPDATE grid SET n_value=? WHERE fid=?;\"\n\n if method == \"Areas\":\n # Areas of intersection:\n cellSize = float(gutils.get_cont_par(\"CELLSIZE\"))\n gridArea = cellSize * cellSize\n if update_roughness(gutils, grid, roughness, column_name):\n return True\n # manning_values = grid_roughness(grid, gridArea, roughness,column_name)\n # for gid, values in manning_values:\n # if values:\n # manning = float(sum(ma * subarea for ma, subarea in values))\n # manning = \"{0:.4}\".format(manning)\n # gutils.execute(qry,(manning, gid),)\n else:\n # Centroids\n gutils.con.executemany(qry, poly2grid(grid, roughness, None, True, False, False, 1, column_name))\n gutils.con.commit()\n return True\n\n # end_time = time.time()\n # QApplication.restoreOverrideCursor()\n # debugMsg('\\t{0:.3f} seconds'.format(end_time - start_time))\n\n except:\n QApplication.restoreOverrideCursor()\n show_error(\n \"ERROR 190620.1154: Evaluation of Mannings's n-value failed!\\n\"\n \"_______________________________________________________________________________\"\n )\n return False\n\ndef gridRegionGenerator(gutils, grid, gridSpan = 100, regionPadding = 50, showProgress = True):\n # yields rectangular selection regions in the grid\n # useful for subdividing large geoprocessing tasks over smaller, discrete regions of the grid\n \n #gridCount = grid.featureCount()\n cellsize = float(gutils.get_cont_par(\"CELLSIZE\"))\n \n # process 100x100 cell regions typically\n gridDimPerAnalysisRegion = gridSpan\n #gridsPerAnalysisRegion = gridDimPerAnalysisRegion ** 2\n \n # determine extent of grid\n gridExt = grid.extent()\n ySpan = gridExt.yMaximum() - gridExt.yMinimum()\n xSpan = gridExt.xMaximum() - gridExt.xMinimum()\n \n # determine # of processing rows/columns based upon analysis regions\n colCount = math.ceil(xSpan / (gridDimPerAnalysisRegion * cellsize))\n rowCount = math.ceil(ySpan / (gridDimPerAnalysisRegion * cellsize))\n \n # segment the grid ext to create analysis regions\n regionCount = rowCount * colCount\n regionCounter = 0 # exit criteria\n \n #regionPadding = 50 # amount, in ft probably, to pad region extents to prevent boundary effects\n \n if showProgress == True:\n progDialog = QProgressDialog(\"Processing Progress (by area - timing will be uneven)\", \"Cancel\", 0, 100)\n progDialog.setModal(True)\n\n progress = 0.0\n while regionCounter < regionCount:\n for row in range(rowCount):\n yMin = gridExt.yMinimum() + ySpan / rowCount * row - regionPadding / 2.0\n yMax = gridExt.yMinimum() + ySpan / rowCount * (row + 1) + regionPadding / 2.0\n for col in range(colCount):\n xMin = gridExt.xMinimum() + xSpan / colCount * col - regionPadding / 2.0\n xMax = gridExt.xMinimum() + xSpan / colCount * (col + 1) + regionPadding / 2.0\n\n queryRect = QgsRectangle(xMin, yMin, xMax, yMax) # xmin, ymin, xmax, ymax\n\n request = QgsFeatureRequest(queryRect)\n regionCounter += 1 # increment regionCounter up\n if showProgress == True:\n if progDialog.wasCanceled() == True:\n break\n progress = regionCounter/regionCount * 100.0\n progDialog.setValue(progress)\n print (\"Processing region: %s of %s\" % (regionCounter, regionCount))\n yield request\n if showProgress == True:\n if progDialog.wasCanceled() == True:\n break\n if showProgress == True:\n progDialog.close()\n\ndef geos2geosGenerator(gutils, grid, inputFC, *valueColumnNames, extraFC = None):\n # extraFC is a second feature class for the case in which 2 feature classes are to be intersected that are not\n # the grid; land-use intersection with soils, for instance\n \n #gridCount = grid.featureCount()\n cellsize = float(gutils.get_cont_par(\"CELLSIZE\"))\n \n # process 100x100 cell regions\n gridDimPerAnalysisRegion = 100\n #gridsPerAnalysisRegion = gridDimPerAnalysisRegion ** 2\n \n # determine extent of grid\n gridExt = grid.extent()\n ySpan = gridExt.yMaximum() - gridExt.yMinimum()\n xSpan = gridExt.xMaximum() - gridExt.xMinimum()\n \n # determine # of processing rows/columns based upon analysis regions\n colCount = math.ceil(xSpan / (gridDimPerAnalysisRegion * cellsize))\n rowCount = math.ceil(ySpan / (gridDimPerAnalysisRegion * cellsize))\n \n # segment the grid ext to create analysis regions\n regionCount = rowCount * colCount\n regionCounter = 0 # exit criteria\n \n regionPadding = 50 # amount, in ft probably, to pad region extents to prevent boundary effects\n \n while regionCounter < regionCount:\n for row in range(rowCount):\n yMin = gridExt.yMinimum() + ySpan / rowCount * row - regionPadding / 2.0\n yMax = gridExt.yMinimum() + ySpan / rowCount * (row + 1) + regionPadding / 2.0\n for col in range(colCount):\n xMin = gridExt.xMinimum() + xSpan / colCount * col - regionPadding / 2.0\n xMax = gridExt.xMinimum() + xSpan / colCount * (col + 1) + regionPadding / 2.0\n\n queryRect = QgsRectangle(xMin, yMin, xMax, yMax) # xmin, ymin, xmax, ymax\n\n request = QgsFeatureRequest(queryRect)\n yieldVal = None\n if extraFC is None:\n yieldVal = poly2poly_geos(grid, inputFC, request, *valueColumnNames) # this returns 2 values\n else:\n yieldVal = poly2poly_geos(inputFC, extraFC, request, *valueColumnNames) # this returns 2 values\n regionCounter += 1 # increment regionCounter up\n yield (yieldVal[0], yieldVal[1], (regionCount - 1) / regionCount) # yield the intersection list and % complete\n\n\ndef update_roughness(gutils, grid, roughness, column_name, reset=False):\n \"\"\"\n Updating roughness values inside 'grid' table.\n \"\"\"\n try:\n # startTime = time.time()\n\n globalnValue = gutils.get_cont_par(\"MANNING\")\n if reset is True:\n gutils.execute(\"UPDATE grid SET n_value=?;\", (globalnValue,))\n else:\n pass\n qry = \"UPDATE grid SET n_value=? WHERE fid=?;\"\n \n gridCount = 0\n for request in gridRegionGenerator(gutils, grid, gridSpan = 100, regionPadding = 50, showProgress = True):\n writeVals = []\n manning_values = poly2poly_geos(grid, roughness, request, column_name) # this returns 2 values\n #if extraFC is None:\n # yieldVal = poly2poly_geos(grid, inputFC, request, *valueColumnNames) # this returns 2 values\n \n for gid, values in manning_values:\n gridCount += 1\n # if gridCount % 1000 == 0:\n # print (\"Processing %s\" % gridCount)\n if values:\n manning = sum(ma * float(subarea) for ma, subarea in values)\n manning = manning + (1.0 - sum(float(subarea) for ma, subarea in values)) * float(globalnValue)\n manning = \"{0:.4}\".format(manning)\n writeVals.append((manning, gid))\n \n if len(writeVals) > 0:\n gutils.con.executemany(qry, writeVals)\n # print (\"committing to db\")\n gutils.con.commit()\n\n return True\n # endTime = time.time()\n # # print (\"total write Time: %s min\" % ((endTime - startTime)/60.0))\n #\n # QApplication.restoreOverrideCursor()\n # debugMsg(\"{0:.3f} seconds sampling Manning's values\".format(endTime - startTime))\n\n except:\n QApplication.restoreOverrideCursor()\n show_error(\n \"ERROR 190620.1158: Evaluation of Mannings's n-value failed!\\n\"\n \"_______________________________________________________________________________\"\n )\n return False\n\n\ndef modify_elevation(gutils, grid, elev):\n \"\"\"\n Modifying elevation values inside 'grid' table.\n \"\"\"\n set_qry = \"UPDATE grid SET elevation = ? WHERE fid = ?;\"\n add_qry = \"UPDATE grid SET elevation = elevation + ? WHERE fid = ?;\"\n set_add_qry = \"UPDATE grid SET elevation = ? + ? WHERE fid = ?;\"\n set_vals = []\n add_vals = []\n set_add_vals = []\n qry_dict = {set_qry: set_vals, add_qry: add_vals, set_add_qry: set_add_vals}\n for el, cor, fid in poly2grid(grid, elev, None, True, False, False, 1, \"elev\", \"correction\"):\n if el != NULL and cor == NULL:\n set_vals.append((el, fid))\n elif el == NULL and cor != NULL:\n add_vals.append((cor, fid))\n elif el != NULL and cor != NULL:\n set_add_vals.append((el, cor, fid))\n else:\n pass\n\n for qry, vals in qry_dict.items():\n if vals:\n cur = gutils.con.cursor()\n cur.executemany(qry, vals)\n gutils.con.commit()\n\n\ndef evaluate_arfwrf(gutils, grid, areas):\n \"\"\"\n Calculating and inserting ARF and WRF values into 'blocked_cells' table.\n\n Parameters\n ----------\n\n gutils:\n the GeoPackageUtils class for the database handling:\n creation on cursor objects, their execution, commits to the tables, etc.\n\n grid:\n the grid layer.\n\n areas:\n the user blocked areas.\n\n \"\"\"\n try:\n nulls = 0\n del_cells = \"DELETE FROM blocked_cells;\"\n qry_cells = [\n \"\"\"INSERT INTO blocked_cells (geom, grid_fid, area_fid, arf, wrf1, wrf2, wrf3, wrf4, wrf5, wrf6, wrf7, wrf8) VALUES\"\"\",\n 12,\n ]\n gutils.execute(del_cells)\n\n for row, was_null in calculate_arfwrf(grid, areas):\n # \"row\" is a tuple like (u'Point (368257 1185586)', 1075L, 1L, 0.06, 0.0, 1.0, 0.0, 0.0, 0.14, 0.32, 0.0, 0.0)\n point_wkt = row[0] # Fist element of tuple \"row\" is a POINT (centroid of cell?)\n point_gpb = gutils.wkt_to_gpb(point_wkt)\n new_row = (point_gpb,) + row[1:]\n qry_cells.append(new_row)\n\n if was_null:\n nulls += 1\n\n gutils.batch_execute(qry_cells)\n\n if nulls > 0:\n ms_box = QMessageBox(\n QMessageBox.Warning,\n \"Warning\",\n \"Calculation of the area reduction factors encountered NULL values in\\n\"\n + \"the atributes of the User Blocked Areas layer.\\n\\n\"\n + str(nulls)\n + \" intersections with the Grid layer were performed but their\\n\"\n + \"references to the NULL values may affect its related FLO-2D funtionality.\",\n )\n\n ms_box.exec_()\n ms_box.show()\n\n return True\n\n except:\n show_error(\n \"ERROR 060319.1605: Evaluation of ARFs and WRFs failed! Please check your Blocked Areas User Layer.\\n\"\n \"_______________________________________________________________________________\"\n )\n return False\n\n\ndef calculate_arfwrf(grid, areas):\n \"\"\"\n Generator which calculates ARF and WRF values based on polygons representing blocked areas.\n \"\"\"\n try:\n sides = (\n (lambda x, y, square_half, octa_half: (x - octa_half, y + square_half, x + octa_half, y + square_half)),\n (lambda x, y, square_half, octa_half: (x + square_half, y + octa_half, x + square_half, y - octa_half)),\n (lambda x, y, square_half, octa_half: (x + octa_half, y - square_half, x - octa_half, y - square_half)),\n (lambda x, y, square_half, octa_half: (x - square_half, y - octa_half, x - square_half, y + octa_half)),\n (lambda x, y, square_half, octa_half: (x + octa_half, y + square_half, x + square_half, y + octa_half)),\n (lambda x, y, square_half, octa_half: (x + square_half, y - octa_half, x + octa_half, y - square_half)),\n (lambda x, y, square_half, octa_half: (x - octa_half, y - square_half, x - square_half, y - octa_half)),\n (lambda x, y, square_half, octa_half: (x - square_half, y + octa_half, x - octa_half, y + square_half)),\n )\n was_null = False\n allfeatures, index = spatial_index(areas)\n features = grid.getFeatures()\n first = next(features)\n grid_area = first.geometry().area()\n grid_side = math.sqrt(grid_area)\n octagon_side = grid_side / 2.414\n half_square = grid_side * 0.5\n half_octagon = octagon_side * 0.5\n empty_wrf = (0,) * 8\n full_wrf = (1,) * 8\n features.rewind()\n for feat in features:\n geom = feat.geometry()\n fids = index.intersects(geom.boundingBox())\n for fid in fids:\n f = allfeatures[fid]\n fgeom = f.geometry()\n if f[\"calc_arf\"] == NULL or f[\"calc_wrf\"] == NULL:\n was_null = True\n farf = int(1 if f[\"calc_arf\"] == NULL else f[\"calc_arf\"])\n fwrf = int(1 if f[\"calc_wrf\"] == NULL else f[\"calc_wrf\"])\n inter = fgeom.intersects(geom)\n if inter is True:\n areas_intersection = fgeom.intersection(geom)\n arf = round(areas_intersection.area() / grid_area, 2) if farf == 1 else 0\n centroid = geom.centroid()\n centroid_wkt = centroid.asWkt()\n if arf >= 0.9:\n yield (centroid_wkt, feat.id(), f.id(), 1) + (full_wrf if fwrf == 1 else empty_wrf), was_null\n continue\n else:\n pass\n grid_center = centroid.asPoint()\n wrf_s = (f(grid_center.x(), grid_center.y(), half_square, half_octagon) for f in sides)\n wrf_geoms = (\n QgsGeometry.fromPolylineXY([QgsPointXY(x1, y1), QgsPointXY(x2, y2)]) for x1, y1, x2, y2 in wrf_s\n )\n if fwrf == 1:\n wrf = (round(line.intersection(fgeom).length() / octagon_side, 2) for line in wrf_geoms)\n else:\n wrf = empty_wrf\n yield (centroid_wkt, feat.id(), f.id(), arf) + tuple(wrf), was_null\n else:\n pass\n\n except:\n show_error(\n \"ERROR 060319.1606: Evaluation of ARFs and WRFs failed! Please check your Blocked Areas User Layer.\\n\"\n \"_______________________________________________________________________________\"\n )\n\n\ndef evaluate_spatial_tolerance(gutils, grid, areas):\n \"\"\"\n Calculating and inserting tolerance values into 'tolspatial_cells' table.\n \"\"\"\n del_cells = \"DELETE FROM tolspatial_cells;\"\n qry_cells = [\"\"\"INSERT INTO tolspatial_cells (area_fid, grid_fid) VALUES\"\"\", 2]\n\n gutils.execute(del_cells)\n for row in calculate_spatial_variable_from_polygons(grid, areas):\n qry_cells.append(row)\n\n gutils.batch_execute(qry_cells)\n\n\ndef evaluate_spatial_buildings_adjustment_factor(gutils, grid, areas):\n gutils.uc.show_warn(\"WARNING 060319.1615: Assignment of building areas to building polygons. Not implemented yet!\")\n\n\ndef evaluate_spatial_froude(gutils, grid, areas):\n \"\"\"\n Calculating and inserting fraude values into 'fpfroude_cells' table.\n \"\"\"\n del_cells = \"DELETE FROM fpfroude_cells;\"\n qry_cells = [\"\"\"INSERT INTO fpfroude_cells (area_fid, grid_fid) VALUES\"\"\", 2]\n\n gutils.execute(del_cells)\n for row in calculate_spatial_variable_from_polygons(grid, areas):\n qry_cells.append(row)\n\n gutils.batch_execute(qry_cells)\n\n\ndef evaluate_spatial_shallow(gutils, grid, areas):\n \"\"\"\n Calculating and inserting shallow-n values into 'spatialshallow_cells' table.\n \"\"\"\n del_cells = \"DELETE FROM spatialshallow_cells;\"\n qry_cells = [\"\"\"INSERT INTO spatialshallow_cells (area_fid, grid_fid) VALUES\"\"\", 2]\n\n gutils.execute(del_cells)\n for row in calculate_spatial_variable_from_polygons(grid, areas):\n qry_cells.append(row)\n\n gutils.batch_execute(qry_cells)\n\n\ndef evaluate_spatial_gutter(gutils, grid, areas, lines):\n \"\"\"\n Calculating and inserting gutter values into 'gutter_cells' table.\n \"\"\"\n del_cells = \"DELETE FROM gutter_cells;\"\n insert_cells_from_polygons = [\"\"\"INSERT INTO gutter_cells (area_fid, grid_fid) VALUES\"\"\", 2]\n insert_cells_from_lines = [\"\"\"INSERT INTO gutter_cells (line_fid, grid_fid) VALUES\"\"\", 2]\n\n gutils.execute(del_cells)\n if areas:\n for row in calculate_spatial_variable_from_polygons(grid, areas):\n insert_cells_from_polygons.append(row)\n gutils.batch_execute(insert_cells_from_polygons)\n\n if lines:\n for row in calculate_gutter_variable_from_lines(grid, lines):\n insert_cells_from_lines.append(row)\n gutils.batch_execute(insert_cells_from_lines)\n\n\ndef evaluate_spatial_noexchange(gutils, grid, areas):\n \"\"\"\n Calculating and inserting noexchange values into 'noexchange_chan_cells' table.\n \"\"\"\n del_cells = \"DELETE FROM noexchange_chan_cells;\"\n qry_cells = [\"\"\"INSERT INTO noexchange_chan_cells (area_fid, grid_fid) VALUES\"\"\", 2]\n\n gutils.execute(del_cells)\n for row in calculate_spatial_variable_from_polygons(grid, areas):\n qry_cells.append(row)\n\n gutils.batch_execute(qry_cells)\n\n\ndef grid_has_empty_elev(gutils):\n \"\"\"\n Return number of grid elements that have no elevation defined.\n \"\"\"\n qry = \"\"\"SELECT count(*) FROM grid WHERE elevation IS NULL;\"\"\"\n res = gutils.execute(qry)\n try:\n n = next(res)\n return n[0]\n except StopIteration:\n return None\n\ndef grid_has_empty_n_value(gutils):\n \"\"\"\n Return number of grid elements that have no n_value defined.\n \"\"\"\n qry = \"\"\"SELECT count(*) FROM grid WHERE n_value IS NULL;\"\"\"\n res = gutils.execute(qry)\n try:\n n = next(res)\n return n[0]\n except StopIteration:\n return None\n\ndef fid_from_grid_np(gutils, table_name, table_fids=None, grid_center = False, switch=False, *extra_fields):\n \"\"\"\n Get a list of grid elements fids that intersect the given tables features.\n Optionally, users can specify a list of table_fids to be checked.\n \"\"\"\n grid_elems = []\n if cellIDNumpyArray is None:\n cellIDNumpyArray, xvalsNumpyArray, yvalsNumpyArray = buildCellIDNPArray(gutils)\n if cellElevNumpyArray is None:\n cellElevNumpyArray = buildCellElevNPArray(gutils, cellIDNumpyArray)\n \n # iterate over features\n \n\n return grid_elems\n\ndef divide_line_grid_np(gutils, line):\n # return the cell ids and segment coordinates for each segment \n # [\n # [15, [[15.25, 14.25], [18.25, 10.2]]],\n # [17, [[18.25, 12.25], [25.25, 13.2]]],\n # ]\n lineSegments = [] \n if cellIDNumpyArray is None:\n cellIDNumpyArray, xvalsNumpyArray, yvalsNumpyArray = buildCellIDNPArray(gutils)\n \n return lineSegments\n\ndef fid_from_grid_features(gutils, grid, linefeatures) :\n \"\"\"\n Get a list of grid elements fids that intersect the grid features.\n Used to calculate levee-line intersections from grid\n \n gridRegionGenerator implemented to increase processing speed for\n large datasets\n \"\"\"\n retVals = []\n \n for region in gridRegionGenerator(gutils, grid, showProgress = True):\n # process each sub-area of the grid\n retVals = []\n for result in calculate_spatial_variable_from_lines(grid, linefeatures, region): # returns grid id, line id, grid elev\n # currently, this goes one line at a time\n retVals.append(result)\n yield (retVals, region)\n # return cell ids and elevations\n #return retVals\n\ndef fid_from_grid(gutils, table_name, table_fids=None, grid_center=False, switch=False, *extra_fields):\n \"\"\"\n Get a list of grid elements fids that intersect the given tables features.\n Optionally, users can specify a list of table_fids to be checked.\n \"\"\"\n grid_geom = \"ST_Centroid(GeomFromGPB(g1.geom))\" if grid_center is True else \"GeomFromGPB(g1.geom)\"\n grid_data = \"g1.fid, \" + \", \".join((\"g1.{}\".format(fld) for fld in extra_fields)) if extra_fields else \"g1.fid\"\n qry = \"\"\"\n SELECT\n g2.fid, {0}\n FROM\n grid AS g1, {1} AS g2\n WHERE g1.ROWID IN (\n SELECT id FROM rtree_grid_geom\n WHERE\n ST_MinX(GeomFromGPB(g2.geom)) <= maxx AND\n ST_MaxX(GeomFromGPB(g2.geom)) >= minx AND\n ST_MinY(GeomFromGPB(g2.geom)) <= maxy AND\n ST_MaxY(GeomFromGPB(g2.geom)) >= miny)\n AND\n ST_Intersects({2}, GeomFromGPB(g2.geom))\n \"\"\"\n qry = qry.format(grid_data, table_name, grid_geom)\n if table_fids:\n qry += \"AND g2.fid IN ({}) \".format(\", \".join(f for f in table_fids))\n else:\n pass\n first, second = (1, 0) if switch is True else (0, 1)\n qry += \"\"\"ORDER BY g2.fid, g1.fid;\"\"\"\n grid_elems = ((row[first], row[second]) + tuple(row[2:]) for row in gutils.execute(qry))\n return grid_elems\n\ndef highlight_selected_segment(layer, id):\n feat_selection = []\n for feature in layer.getFeatures():\n if feature.id() == id:\n feat_selection.append(feature.id())\n break\n layer.selectByIds(feat_selection)\n\n\ndef highlight_selected_xsection_a(gutils, layer, xs_id):\n qry = \"\"\"SELECT id FROM chan_elems WHERE fid = ?;\"\"\"\n xs = gutils.execute(qry, (xs_id,)).fetchone()\n feat_selection = []\n for feature in layer.getFeatures():\n if feature.id() == xs[0]:\n feat_selection.append(feature.id())\n break\n layer.selectByIds(feat_selection)\n\n\ndef highlight_selected_xsection_b(layer, xs_id):\n feat_selection = []\n for feature in layer.getFeatures():\n if feature.id() == xs_id:\n feat_selection.append(feature.id())\n break\n layer.selectByIds(feat_selection)\n\ndef buildCellIDNPArray(gutils):\n # construct numpy arrays of key grid parameters such as cellid and elevation\n starttime = datetime.datetime.now()\n incTime = datetime.datetime.now()\n \n centroids = gutils.grid_centroids_all()\n print (\"Centroids pull time: %s sec\" % (datetime.datetime.now() - incTime).total_seconds())\n incTime = datetime.datetime.now()\n # list in format [gid, [x, y]]\n xVals = sorted(list(set([item[1][0] for item in centroids])))\n yVals = sorted(list(set([item[1][1] for item in centroids])))\n \n centroids = sorted(centroids, key=lambda student: student[0])\n \n centroids = [(item[1][0], item[1][1], item[0]) for item in centroids] # flatten list\n \n centroids = np.array(centroids, dtype=float)\n \n #yVals = yVals.sort(reverse=True) # place in reverse order per raster orientation\n xVals = np.array(xVals, dtype=float)\n yVals = np.array(yVals, dtype=float)\n \n centroidsXInd = np.searchsorted(xVals, centroids[:,0], side='right')-1\n centroidsYInd = np.searchsorted(yVals, centroids[:,1], side='right')\n\n centroidsYInd = yVals.shape[0] - centroidsYInd # for reverse ordering\n yVals = np.flip(yVals) # reverse order\n \n cellIDs = np.zeros((yVals.shape[0], xVals.shape[0]), dtype=int)\n # populate cellIDs array\n cellIDs[centroidsYInd, centroidsXInd] = centroids[:,2]\n #for n in range(centroids.shape[0]):\n # cellIDs[centroidsYInd[n], centroidsXInd[n]] = n + 1\n \n del centroidsXInd, centroidsYInd, centroids\n print (\"Array creation time: %s sec\" % (datetime.datetime.now() - incTime).total_seconds())\n print (\"Total CellID time: %s sec\" % (datetime.datetime.now() - starttime).total_seconds())\n return cellIDs, xVals, yVals\n\ndef buildCellElevNPArray(gutils, cellIDArray):\n starttime = datetime.datetime.now()\n qry_elevs = (\n \"\"\"SELECT elevation FROM grid ORDER BY fid\"\"\"\n )\n elevs = gutils.execute(qry_elevs).fetchall()\n print (\"Elevs pull time: %s sec\" % (datetime.datetime.now() - starttime).total_seconds())\n incTime = datetime.datetime.now()\n \n elevs = np.array(elevs, dtype=float)\n elevs = elevs[:,0]\n elevArray = np.zeros(cellIDArray.shape, dtype=float)\n\n elevArray[cellIDArray != 0] = elevs[cellIDArray[cellIDArray != 0]-1]\n print (\"Elevs Array assignment time: %s sec\" % (datetime.datetime.now() - incTime).total_seconds())\n incTime = datetime.datetime.now()\n print (\"Total Elev Array Gen time: %s sec\" % (datetime.datetime.now() - starttime).total_seconds())\n return elevArray\n\ndef adjacent_grid_elevations_np(cell, cellNPArray, elevNPArray):\n # order is N, NE, E, SE, S, SW, W, NW\n row, col = np.nonzero(cellNPArray == cell)\n row = row[0]\n col = col[0]\n \n dirMatrix = np.array([\n [-1, 0], # N\n [-1, 1], # NE\n [0, 1], # E\n [1, 1], # SE\n [1, 0], # S\n [1, -1], # SW\n [0, -1], # W\n [-1, -1] # NW\n ], dtype=int)\n \n rows = row + 1 * dirMatrix[:,0]\n cols = col + 1 * dirMatrix[:,1]\n \n # filter out entries that are beyond the extents\n mask = rows < elevNPArray.shape[0]\n mask &= rows >= 0\n mask &= cols < elevNPArray.shape[1]\n mask &= cols >= 0\n \n elevs = np.zeros(rows.shape, dtype=float)\n elevs[~mask] = -999\n elevs[mask] = elevNPArray[rows[mask], cols[mask]]\n \n elevs = list(elevs)\n \n return elevs\n \ndef adjacent_grid_elevations(gutils, grid_lyr, cell, cell_size):\n sel_elev_qry = \"\"\"SELECT elevation FROM grid WHERE fid = ?;\"\"\"\n if grid_lyr is not None:\n if cell != \"\":\n cell = int(cell)\n grid_count = gutils.count(\"grid\", field = \"fid\") \n #grid_count = len(list(grid_lyr.getFeatures()))\n if grid_count >= cell and cell > 0:\n currentCell = next(grid_lyr.getFeatures(QgsFeatureRequest(cell)))\n xx, yy = currentCell.geometry().centroid().asPoint()\n\n elevs = []\n # North cell:\n y = yy + cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n N_elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n else:\n N_elev = -999\n elevs.append(N_elev)\n\n # NorthEast cell\n y = yy + cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n NE_elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n else:\n NE_elev = -999\n elevs.append(NE_elev)\n\n # East cell:\n x = xx + cell_size\n y = yy\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n E_elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n else:\n E_elev = -999\n elevs.append(E_elev)\n\n # SouthEast cell:\n y = yy - cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n SE_elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n else:\n SE_elev = -999\n elevs.append(SE_elev)\n\n # South cell:\n y = yy - cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n S_elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n else:\n S_elev = -999\n elevs.append(S_elev)\n\n # SouthWest cell:\n y = yy - cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n SW_elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n else:\n SW_elev = -999\n elevs.append(SW_elev)\n\n # West cell:\n y = yy\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n W_elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n else:\n W_elev = -999\n elevs.append(W_elev)\n\n # NorthWest cell:\n y = yy + cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n NW_elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n else:\n NW_elev = -999\n elevs.append(NW_elev)\n\n return elevs\n\n\ndef adjacent_average_elevation(gutils, grid_lyr, xx, yy, cell_size):\n # sel_elev_qry = \"SELECT elevation FROM grid WHERE fid = ?;\"\n if grid_lyr is not None:\n elevs = []\n \n # North cell:\n y = yy + cell_size\n x = xx\n e = gutils.grid_elevation_on_point(x, y)\n # if e is not None and e != -9999:\n elevs.append(e)\n\n # NorthEast cell\n y = yy + cell_size\n x = xx + cell_size\n e = gutils.grid_elevation_on_point(x, y)\n # if e is not None and e != -9999:\n elevs.append(e)\n \n # East cell:\n x = xx + cell_size\n y = yy\n e = gutils.grid_elevation_on_point(x, y)\n # if e is not None and e != -9999:\n elevs.append(e)\n\n # SouthEast cell:\n y = yy - cell_size\n x = xx + cell_size\n e = gutils.grid_elevation_on_point(x, y)\n # if e is not None and e != -9999:\n elevs.append(e)\n \n # South cell:\n y = yy - cell_size\n x = xx\n e = gutils.grid_elevation_on_point(x, y)\n # if e is not None and e != -9999:\n elevs.append(e)\n\n # SouthWest cell:\n y = yy - cell_size\n x = xx - cell_size\n e = gutils.grid_elevation_on_point(x, y)\n # if e is not None and e != -9999:\n elevs.append(e)\n\n # West cell:\n y = yy\n x = xx - cell_size\n e = gutils.grid_elevation_on_point(x, y)\n # if e is not None and e != -9999:\n elevs.append(e)\n\n # NorthWest cell:\n y = yy + cell_size\n x = xx - cell_size\n e = gutils.grid_elevation_on_point(x, y)\n # if e is not None and e != -9999:\n elevs.append(e)\n \n # Return average elevation of adjacent cells: \n n= 0\n avrg = 0\n for elev in elevs:\n if elev is not None and elev != -9999:\n avrg += elev\n n += 1\n if n > 0:\n avrg = avrg / n \n else:\n avrg = -9999\n \n return avrg\n\n\ndef three_adjacent_grid_elevations(gutils, grid_lyr, cell, direction, cell_size):\n\n # if grid_lyr is not None:\n # if cell != '':\n # cell = int(cell)\n # grid_count = len(list(grid_lyr.getFeatures()))\n # if grid_count >= cell and cell > 0:\n\n try:\n # Expects a cell number inside the computational domain.\n sel_elev_qry = \"\"\"SELECT elevation FROM grid WHERE fid = ?;\"\"\"\n currentCell = next(grid_lyr.getFeatures(QgsFeatureRequest(cell)))\n xx, yy = currentCell.geometry().centroid().asPoint()\n\n elevs = []\n\n if direction == 1: # North => NW, N, NE\n # NorthWest cell:\n y = yy + cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # North cell:\n y = yy + cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # NorthEast cell:\n y = yy + cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n elif direction == 2: # East => NE, E, SE\n # NorthEast cell:\n y = yy + cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # East cell:\n x = xx + cell_size\n y = yy\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # SouthEast cell:\n y = yy - cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n elif direction == 3: # South => SE, S, SW\n # SouthEast cell:\n y = yy - cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # South cell:\n y = yy - cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # SouthWest cell:\n y = yy - cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n elif direction == 4: # West => SW, W, NW\n # SouthWest cell:\n y = yy - cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # West cell:\n y = yy\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # NorthWest cell:\n y = yy + cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n elif direction == 5: # NorthEast => N, NE, E\n # North cell:\n y = yy + cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # NorthEast cell:\n y = yy + cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # East cell:\n x = xx + cell_size\n y = yy\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n elif direction == 6: # SouthEast => E, SE, S\n # East cell:\n x = xx + cell_size\n y = yy\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # SouthEast cell:\n y = yy - cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # South cell:\n y = yy - cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n elif direction == 7: # SouthWest => S, SW, W\n # South cell:\n y = yy - cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # SouthWest cell:\n y = yy - cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # West cell:\n y = yy\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n elif direction == 8: # NorthWest => W, NW, N\n # West cell:\n y = yy\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # NorthWest cell:\n y = yy + cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n # North cell:\n y = yy + cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elevs.append(gutils.execute(sel_elev_qry, (grid,)).fetchone()[0])\n else:\n elevs.append(-99999)\n\n return elevs\n except:\n show_error(\"ERROR 040420.1715: could not evaluate adjacent cell elevation!\")\n\ndef get_adjacent_cell_elevation(gutils, grid_lyr, cell, dir, cell_size):\n try:\n sel_elev_qry = \"\"\"SELECT elevation FROM grid WHERE fid = ?;\"\"\"\n currentCell = next(grid_lyr.getFeatures(QgsFeatureRequest(cell)))\n xx, yy = currentCell.geometry().centroid().asPoint()\n\n elev = -999\n if dir == 1: # \"N\"\n # North cell:\n y = yy + cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n\n elif dir == 5: # \"NE\"\n # NorthEast cell:\n y = yy + cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n\n elif dir == 2: # \"E\"\n # East cell:\n x = xx + cell_size\n y = yy\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n\n elif dir == 6: # \"SE\"\n # SouthEast cell:\n y = yy - cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n\n elif dir == 3: # \"S\"\n # South cell:\n y = yy - cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n\n elif dir == 7: # \"SW\"\n # SouthWest cell:\n y = yy - cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n\n elif dir == 4: # \"W\"\n # West cell:\n y = yy\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n\n elif dir == 8: # \"NW\"\n # NorthWest cell:\n y = yy + cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is not None:\n elev = gutils.execute(sel_elev_qry, (grid,)).fetchone()[0]\n\n else:\n show_error(\"ERROR 160520.1650: Invalid direction!\")\n\n return grid, elev\n except:\n show_error(\"ERROR 160520.1644: could not evaluate adjacent cell elevation!\")\n\ndef get_adjacent_cell(gutils, grid_lyr, cell, dir, cell_size):\n try:\n currentCell = next(grid_lyr.getFeatures(QgsFeatureRequest(cell)))\n xx, yy = currentCell.geometry().centroid().asPoint()\n\n elev = -999\n if dir == \"N\":\n # North cell:\n y = yy + cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n\n\n elif dir == \"NE\":\n # NorthEast cell:\n y = yy + cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n\n elif dir == \"E\":\n # East cell:\n x = xx + cell_size\n y = yy\n grid = gutils.grid_on_point(x, y)\n\n elif dir == \"SE\":\n # SouthEast cell:\n y = yy - cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n\n elif dir == \"S\":\n # South cell:\n y = yy - cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n\n elif dir == \"SW\":\n # SouthWest cell:\n y = yy - cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n\n elif dir == \"W\":\n # West cell:\n y = yy\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n\n elif dir == \"NW\":\n # NorthWest cell:\n y = yy + cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n\n else:\n show_error(\"ERROR 090321.1623: Invalid direction!\")\n\n return grid\n except:\n show_error(\"ERROR 090321.1624: could not evaluate adjacent cell!\")\n\ndef adjacent_grids(gutils, currentCell, cell_size):\n xx, yy = currentCell.geometry().centroid().asPoint()\n\n # North cell:\n y = yy + cell_size\n x = xx\n n_grid = gutils.grid_on_point(x, y)\n\n # NorthEast cell\n y = yy + cell_size\n x = xx + cell_size\n ne_grid = gutils.grid_on_point(x, y)\n\n # East cell:\n x = xx + cell_size\n y = yy\n e_grid = gutils.grid_on_point(x, y)\n\n # SouthEast cell:\n y = yy - cell_size\n x = xx + cell_size\n se_grid = gutils.grid_on_point(x, y)\n\n # South cell:\n y = yy - cell_size\n x = xx\n s_grid = gutils.grid_on_point(x, y)\n\n # SouthWest cell:\n y = yy - cell_size\n x = xx - cell_size\n sw_grid = gutils.grid_on_point(x, y)\n\n # West cell:\n y = yy\n x = xx - cell_size\n w_grid = gutils.grid_on_point(x, y)\n\n # NorthWest cell:\n y = yy + cell_size\n x = xx - cell_size\n nw_grid = gutils.grid_on_point(x, y) \n \n return n_grid, ne_grid, e_grid, se_grid, s_grid, sw_grid, w_grid, nw_grid\ndef dirID(dir):\n if dir == 1: # \"N\"\n # North cell:\n ID = \"N\"\n\n elif dir == 5: # \"NE\"\n # NorthEast cell:\n ID = \"NE\"\n\n elif dir == 2: # \"E\"\n # East cell:\n ID = \"E\"\n\n elif dir == 6: # \"SE\"\n # SouthEast cell:\n ID = \"SE\"\n\n elif dir == 3: # \"S\"\n # South cell:\n ID = \"S\"\n\n elif dir == 7: # \"SW\"\n # SouthWest cell:\n ID = \"SW\"\n\n elif dir == 4: # \"W\"\n # West cell:\n ID = \"W\"\n\n elif dir == 8: # \"NW\"\n # NorthWest cell:\n ID = \"NW\"\n\n else:\n ID = \"?\"\n\n return ID\n\n\ndef is_boundary_cell(gutils, grid_lyr, cell, cell_size):\n if grid_lyr is not None:\n if cell:\n n_cells = number_of_elements(gutils, grid_lyr)\n if n_cells >= cell and cell > 0:\n\n currentCell = next(grid_lyr.getFeatures(QgsFeatureRequest(cell)))\n xx, yy = currentCell.geometry().centroid().asPoint()\n\n # North cell:\n y = yy + cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n if grid is None:\n return True\n\n # NorthEast cell\n y = yy + cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is None:\n return True\n\n # East cell:\n y = yy\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is None:\n return True\n\n # SouthEast cell:\n y = yy - cell_size\n x = xx + cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is None:\n return True\n\n # South cell:\n y = yy - cell_size\n x = xx\n grid = gutils.grid_on_point(x, y)\n if grid is None:\n return True\n\n # SouthWest cell:\n y = yy - cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is None:\n return True\n\n # West cell:\n y = yy\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is None:\n return True\n\n # NorthWest cell:\n y = yy + cell_size\n x = xx - cell_size\n grid = gutils.grid_on_point(x, y)\n if grid is None:\n return True\n\n return False\n\n\ndef layer_geometry_is_valid(vlayer):\n \"\"\"Checking if all features geometries are GEOS valid.\"\"\"\n for feat in vlayer.getFeatures():\n geom = feat.geometry()\n if not geom.isGeosValid():\n return False\n return True\n\n\ndef number_of_elements(gutils, layer):\n # if len(layer) > 0:\n # return len(layer)\n # if layer.featureCount() > 0:\n # return layer.featureCount()\n # else:\n count_sql = \"\"\"SELECT COUNT(fid) FROM grid;\"\"\"\n a = gutils.execute(count_sql).fetchone()[0]\n if a:\n return a\n else:\n return len(list(layer.getFeatures())) \n\ndef cell_centroid(self, cell): \n col, row = self.gutils.execute(\"SELECT col, row FROM grid WHERE fid = ?;\",(cell,)).fetchone()\n x = self.xMinimum + (col-2)*self.cell_size + self.cell_size/2\n y = self.yMinimum + (row-2)*self.cell_size + self.cell_size/2\n return x, y\n\ndef cell_elevation(self, x, y):\n col = int((float(x) - self.xMinimum)/self.cell_size) + 2\n row = int((float(y) - self.yMinimum)/self.cell_size) + 2 \n elev = self.gutils.execute(\"SELECT elevation FROM grid WHERE col = ? AND row = ?;\", (col, row,)).fetchone()\n return elev \n \ndef render_grid_elevations2(elevs_lyr, show_nodata, mini, mini2, maxi):\n if show_nodata:\n colors = ['#0011FF', '#0061FF', '#00D4FF', '#00FF66', '#00FF00', '#E5FF32', '#FCFC0C', '#FF9F00', '#FF3F00', '#FF0000']\n myRangeList = []\n if mini == -9999: \n symbol = QgsSymbol.defaultSymbol(elevs_lyr.geometryType()) \n symbol.symbolLayer(0).setStrokeStyle(Qt.PenStyle(Qt.NoPen)) \n symbol.setColor(QColor(Qt.lightGray)) \n try:\n symbol.setSize(1) \n except:\n pass \n myRange = QgsRendererRange(-9999, -9999, symbol, '-9999') \n myRangeList.append(myRange) \n step = (maxi - mini2) / (len(colors)-1)\n low = mini2\n high = mini2 + step \n else:\n step = (maxi - mini) / (len(colors)-1) \n low = mini\n high = mini + step\n \n for i in range (0,len(colors)-2): \n symbol = QgsSymbol.defaultSymbol(elevs_lyr.geometryType()) \n symbol.symbolLayer(0).setStrokeStyle(Qt.PenStyle(Qt.NoPen)) \n symbol.setColor(QColor(colors[i]))\n try:\n symbol.setSize(1) \n except:\n pass \n myRange = QgsRendererRange(low,high, symbol, '{0:.2f}'.format(low) + ' - ' + '{0:.2f}'.format(high)) \n myRangeList.append(myRange)\n low = high\n high = high + step\n \n symbol = QgsSymbol.defaultSymbol(elevs_lyr.geometryType()) \n symbol.symbolLayer(0).setStrokeStyle(Qt.PenStyle(Qt.NoPen)) \n symbol.setColor(QColor(colors[len(colors)-1]))\n try:\n symbol.setSize(1) \n except:\n pass \n \n myRange = QgsRendererRange(low,maxi, symbol, '{0:.2f}'.format(low) + ' - ' + '{0:.2f}'.format(maxi)) \n myRangeList.append(myRange)\n\n myRenderer = QgsGraduatedSymbolRenderer(\"elevation\", myRangeList) \n myRenderer.setMode(QgsGraduatedSymbolRenderer.Custom) \n \n elevs_lyr.setRenderer(myRenderer) \n elevs_lyr.triggerRepaint()\n\n else:\n style_path2 = get_file_path(\"styles\", \"grid.qml\")\n if os.path.isfile(style_path2):\n err_msg, res = elevs_lyr.loadNamedStyle(style_path2)\n if not res:\n QApplication.restoreOverrideCursor()\n msg = \"Unable to load style {}.\\n{}\".format(style_path2, err_msg)\n raise Flo2dError(msg)\n else:\n QApplication.restoreOverrideCursor()\n raise Flo2dError(\"Unable to load style {}\".format(style_path2))\n prj = QgsProject.instance()\n prj.layerTreeRoot().findLayer(elevs_lyr.id()).setItemVisibilityCheckedParentRecursive(True) \n \ndef find_this_cell(iface, lyrs, uc, gutils, cell, color = Qt.yellow, zoom_in = False, clear_previous = True):\n try:\n QApplication.setOverrideCursor(Qt.WaitCursor)\n grid = lyrs.data[\"grid\"][\"qlyr\"]\n if grid is not None:\n if grid:\n ext = iface.mapCanvas().extent()\n if cell != \"\":\n cell = int(cell)\n if len(grid) >= cell and cell > 0:\n lyrs.show_feat_rubber(grid.id(), cell, QColor(color), clear_previous)\n currentCell = next(grid.getFeatures(QgsFeatureRequest(cell)))\n x, y = currentCell.geometry().centroid().asPoint()\n if (\n x < ext.xMinimum()\n or x > ext.xMaximum()\n or y < ext.yMinimum()\n or y > ext.yMaximum()\n ):\n center_canvas(iface, x, y)\n ext = iface.mapCanvas().extent()\n else:\n if zoom_in: \n center_canvas(iface, x, y)\n cell_size = float(gutils.get_cont_par(\"CELLSIZE\"))\n zoom_show_n_cells(iface, cell_size, 30)\n ext = iface.mapCanvas().extent()\n else:\n if cell != -999:\n uc.bar_warn(\"Cell \" + str(cell) + \" not found.\", 2)\n lyrs.clear_rubber()\n else:\n lyrs.clear_rubber()\n else:\n if cell.strip() != \"-999\" and cell.strip() != \"\":\n uc.bar_warn(\"Cell \" + str(cell) + \" not found.\", 2)\n lyrs.clear_rubber()\n else:\n lyrs.clear_rubber()\n except ValueError:\n uc.bar_warn(\"Cell \" + str(cell) + \" is not valid.\")\n lyrs.clear_rubber()\n pass\n finally:\n QApplication.restoreOverrideCursor()\n \n", "sub_path": "flo2d/flo2d_tools/grid_tools.py", "file_name": "grid_tools.py", "file_ext": "py", "file_size_in_byte": 93270, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "qgis.analysis.QgsInterpolator.LayerData", "line_number": 65, "usage_type": "call"}, {"api_name": "qgis.analysis.QgsInterpolator", "line_number": 65, "usage_type": "name"}, {"api_name": "qgis.analysis.QgsTinInterpolator", "line_number": 70, "usage_type": "call"}, {"api_name": "qgis.core.QgsFeedback", "line_number": 73, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 86, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 124, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 125, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.createGeometryEngine", "line_number": 135, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 135, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 175, "usage_type": "call"}, {"api_name": "os.devnull", "line_number": 175, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 175, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 175, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 188, "usage_type": "call"}, {"api_name": "os.devnull", "line_number": 188, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 188, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 188, "usage_type": "name"}, {"api_name": "qgis.core.QgsFeatureRequest", "line_number": 193, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 217, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 223, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 249, "usage_type": "call"}, {"api_name": "os.path", "line_number": 249, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 250, "usage_type": "call"}, {"api_name": "os.path", "line_number": 250, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 259, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 260, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.createGeometryEngine", "line_number": 270, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 270, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 310, "usage_type": "call"}, {"api_name": "os.devnull", "line_number": 310, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 310, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 310, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 312, "usage_type": "call"}, {"api_name": "os.path", "line_number": 312, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 323, "usage_type": "call"}, {"api_name": "os.devnull", "line_number": 323, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 323, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 323, "usage_type": "name"}, {"api_name": "qgis.core.QgsFeatureRequest", "line_number": 328, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox", "line_number": 348, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 354, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox", "line_number": 358, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox.Critical", "line_number": 359, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox", "line_number": 359, "usage_type": "name"}, {"api_name": "qgis.analysis.QgsZonalStatistics", "line_number": 380, "usage_type": "call"}, {"api_name": "qgis.core.QgsSpatialIndex", "line_number": 391, "usage_type": "call"}, {"api_name": "qgis.core.QgsFeature", "line_number": 393, "usage_type": "call"}, {"api_name": "qgis.core.QgsSpatialIndex", "line_number": 404, "usage_type": "call"}, {"api_name": "qgis.core.QgsFeature", "line_number": 406, "usage_type": "call"}, {"api_name": "qgis.core.QgsSpatialIndex", "line_number": 418, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.ValidatorGeos", "line_number": 427, "usage_type": "attribute"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 427, "usage_type": "name"}, {"api_name": "errors.GeometryValidityErrors", "line_number": 429, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.createGeometryEngine", "line_number": 433, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 433, "usage_type": "name"}, {"api_name": "qgis.core.QgsFeature", "line_number": 435, "usage_type": "call"}, {"api_name": "qgis.core.QgsPointXY", "line_number": 466, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.fromPolygonXY", "line_number": 467, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 467, "usage_type": "name"}, {"api_name": "qgis.core.QgsPointXY", "line_number": 468, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.fromPolygonXY", "line_number": 470, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 470, "usage_type": "name"}, {"api_name": "qgis.core.QgsPointXY", "line_number": 471, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.fromPolygonXY", "line_number": 473, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 473, "usage_type": "name"}, {"api_name": "qgis.core.QgsPointXY", "line_number": 474, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.fromPolygonXY", "line_number": 476, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 476, "usage_type": "name"}, {"api_name": "qgis.core.QgsPointXY", "line_number": 477, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.fromPolygonXY", "line_number": 486, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 486, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 517, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 518, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.createGeometryEngine", "line_number": 521, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 521, "usage_type": "name"}, {"api_name": "qgis.core.QgsGeometry.fromPointXY", "line_number": 526, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 526, "usage_type": "name"}, {"api_name": "qgis.core.QgsPointXY", "line_number": 526, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 563, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 564, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.createGeometryEngine", "line_number": 567, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 567, "usage_type": "name"}, {"api_name": "qgis.core.QgsGeometry.fromPointXY", "line_number": 572, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 572, "usage_type": "name"}, {"api_name": "qgis.core.QgsPointXY", "line_number": 572, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.createGeometryEngine", "line_number": 662, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 662, "usage_type": "name"}, {"api_name": "qgis.core.NULL", "line_number": 677, "usage_type": "name"}, {"api_name": "qgis.core.QgsGeometry.createGeometryEngine", "line_number": 730, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 730, "usage_type": "name"}, {"api_name": "qgis.core.QgsGeometry.createGeometryEngine", "line_number": 768, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 768, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 804, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.createGeometryEngine", "line_number": 811, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 811, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 837, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry.fromPolygonXY", "line_number": 841, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 841, "usage_type": "name"}, {"api_name": "qgis.core.QgsGeometry.unaryUnion", "line_number": 854, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 854, "usage_type": "name"}, {"api_name": "qgis.core.QgsGeometry.fromPolygonXY", "line_number": 856, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 856, "usage_type": "name"}, {"api_name": "qgis.core.QgsFeature", "line_number": 862, "usage_type": "call"}, {"api_name": "qgis.core.QgsRasterLayer", "line_number": 943, "usage_type": "call"}, {"api_name": "qgis.core.QgsRaster.IdentifyFormatValue", "line_number": 950, "usage_type": "attribute"}, {"api_name": "qgis.core.QgsRaster", "line_number": 950, "usage_type": "name"}, {"api_name": "utils.is_number", "line_number": 953, "usage_type": "call"}, {"api_name": "qgis.core.QgsRasterLayer", "line_number": 981, "usage_type": "call"}, {"api_name": "qgis.core.QgsRaster.IdentifyFormatValue", "line_number": 991, "usage_type": "attribute"}, {"api_name": "qgis.core.QgsRaster", "line_number": 991, "usage_type": "name"}, {"api_name": "utils.is_number", "line_number": 994, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication.restoreOverrideCursor", "line_number": 1063, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication", "line_number": 1063, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication.restoreOverrideCursor", "line_number": 1078, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication", "line_number": 1078, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication.restoreOverrideCursor", "line_number": 1121, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication", "line_number": 1121, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 1145, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 1146, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QProgressDialog", "line_number": 1155, "usage_type": "call"}, {"api_name": "qgis.core.QgsRectangle", "line_number": 1167, "usage_type": "call"}, {"api_name": "qgis.core.QgsFeatureRequest", "line_number": 1169, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 1201, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 1202, "usage_type": "call"}, {"api_name": "qgis.core.QgsRectangle", "line_number": 1218, "usage_type": "call"}, {"api_name": "qgis.core.QgsFeatureRequest", "line_number": 1220, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication.restoreOverrideCursor", "line_number": 1274, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication", "line_number": 1274, "usage_type": "name"}, {"api_name": "qgis.core.NULL", "line_number": 1294, "usage_type": "name"}, {"api_name": "qgis.core.NULL", "line_number": 1296, "usage_type": "name"}, {"api_name": "qgis.core.NULL", "line_number": 1298, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox", "line_number": 1350, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox.Warning", "line_number": 1351, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QMessageBox", "line_number": 1351, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 1393, "usage_type": "call"}, {"api_name": "qgis.core.NULL", "line_number": 1406, "usage_type": "name"}, {"api_name": "qgis.core.NULL", "line_number": 1408, "usage_type": "name"}, {"api_name": "qgis.core.NULL", "line_number": 1409, "usage_type": "name"}, {"api_name": "qgis.core.QgsGeometry.fromPolylineXY", "line_number": 1424, "usage_type": "call"}, {"api_name": "qgis.core.QgsGeometry", "line_number": 1424, "usage_type": "name"}, {"api_name": "qgis.core.QgsPointXY", "line_number": 1424, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 1655, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1655, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1656, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1656, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1659, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1659, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1660, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1660, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 1669, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1672, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1673, "usage_type": "call"}, {"api_name": "numpy.searchsorted", "line_number": 1675, "usage_type": "call"}, {"api_name": "numpy.searchsorted", "line_number": 1676, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 1679, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1681, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 1688, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1688, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1689, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1689, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1693, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1693, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1698, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1698, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1699, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1699, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 1701, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1703, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 1706, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1706, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1707, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1707, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 1708, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1708, "usage_type": "attribute"}, {"api_name": "numpy.nonzero", "line_number": 1713, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1717, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1737, "usage_type": "call"}, {"api_name": "qgis.core.QgsFeatureRequest", "line_number": 1753, "usage_type": "call"}, {"api_name": "qgis.core.QgsFeatureRequest", "line_number": 1927, "usage_type": "call"}, {"api_name": "qgis.core.QgsFeatureRequest", "line_number": 2163, "usage_type": "call"}, {"api_name": "qgis.core.QgsFeatureRequest", "line_number": 2240, "usage_type": "call"}, {"api_name": "qgis.core.QgsFeatureRequest", "line_number": 2389, "usage_type": "call"}, {"api_name": "qgis.core.QgsSymbol.defaultSymbol", "line_number": 2490, "usage_type": "call"}, {"api_name": "qgis.core.QgsSymbol", "line_number": 2490, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.PenStyle", "line_number": 2491, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 2491, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.NoPen", "line_number": 2491, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 2492, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.Qt.lightGray", "line_number": 2492, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 2492, "usage_type": "name"}, {"api_name": "qgis.core.QgsRendererRange", "line_number": 2497, "usage_type": "call"}, {"api_name": "qgis.core.QgsSymbol.defaultSymbol", "line_number": 2508, "usage_type": "call"}, {"api_name": "qgis.core.QgsSymbol", "line_number": 2508, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.PenStyle", "line_number": 2509, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 2509, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.NoPen", "line_number": 2509, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 2510, "usage_type": "call"}, {"api_name": "qgis.core.QgsRendererRange", "line_number": 2515, "usage_type": "call"}, {"api_name": "qgis.core.QgsSymbol.defaultSymbol", "line_number": 2520, "usage_type": "call"}, {"api_name": "qgis.core.QgsSymbol", "line_number": 2520, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.PenStyle", "line_number": 2521, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 2521, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.NoPen", "line_number": 2521, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 2522, "usage_type": "call"}, {"api_name": "qgis.core.QgsRendererRange", "line_number": 2528, "usage_type": "call"}, {"api_name": "qgis.core.QgsGraduatedSymbolRenderer", "line_number": 2531, "usage_type": "call"}, {"api_name": "qgis.core.QgsGraduatedSymbolRenderer.Custom", "line_number": 2532, "usage_type": "attribute"}, {"api_name": "qgis.core.QgsGraduatedSymbolRenderer", "line_number": 2532, "usage_type": "name"}, {"api_name": "utils.get_file_path", "line_number": 2538, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 2539, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2539, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication.restoreOverrideCursor", "line_number": 2542, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication", "line_number": 2542, "usage_type": "name"}, {"api_name": "errors.Flo2dError", "line_number": 2544, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication.restoreOverrideCursor", "line_number": 2546, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication", "line_number": 2546, "usage_type": "name"}, {"api_name": "errors.Flo2dError", "line_number": 2547, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 2548, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 2548, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.yellow", "line_number": 2551, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 2551, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication.setOverrideCursor", "line_number": 2553, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication", "line_number": 2553, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.Qt.WaitCursor", "line_number": 2553, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtCore.Qt", "line_number": 2553, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtGui.QColor", "line_number": 2561, "usage_type": "call"}, {"api_name": "qgis.core.QgsFeatureRequest", "line_number": 2562, "usage_type": "call"}, {"api_name": "gui.ui_utils.center_canvas", "line_number": 2570, "usage_type": "call"}, {"api_name": "gui.ui_utils.center_canvas", "line_number": 2574, "usage_type": "call"}, {"api_name": "gui.ui_utils.zoom_show_n_cells", "line_number": 2576, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication.restoreOverrideCursor", "line_number": 2595, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QApplication", "line_number": 2595, "usage_type": "name"}]} +{"seq_id": "61901091", "text": "import requests\nfrom bs4 import BeautifulSoup\nurl = 'http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getCtprvnRltmMesureDnsty?serviceKey=QaGapZXPV5DTM72fy6lrf3hJnrJxhila1UVkPlUCo0N0g0F0RZ9WEngT8RkNjNo4IF%2BikV%2BthQLze39nK4IQjA%3D%3D&numOfRows=10&pageSize=10&pageNo=3&startPage=3&sidoName=%EC%84%9C%EC%9A%B8&ver=1.6'\n\nrequest = requests.get(url).text\n# print(type(request)) #= \n# print(request)\n\nsoup = BeautifulSoup(request,'xml')\n# print(type(soup)) #= \n# print(soup)\n\ngangnam = soup('item')[7]\nlocation = gangnam.stationName.text\ntime = gangnam.dataTime.text\ndust = int(gangnam.pm10Value.text)\n\n# dust 변수에 들어 있는 내용을 출력해보세요.\nprint('{0} 기준 {1}의 미세먼지 농도는 {2}입니다.'.format(time,location,dust)) # 기존방식\nprint(f\"{time} 기준 {location}의 미세먼지 농도는 {dust}입니다.\") # 새로운 fString 방식\n\n# dust 변수에 들어 있는 값을 기준으로 상태 정보를 출력해부세요.\nif 150 < dust:\n print(\"매우 나쁨\")\nelif 80 < dust <= 150:\n print(\"나쁨\")\nelif 30 < dust <= 80:\n print(\"보통\")\nelse:\n print(\"좋음\")", "sub_path": "Practice/dust_app.py", "file_name": "dust_app.py", "file_ext": "py", "file_size_in_byte": 1172, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "8554443", "text": "'''\n\t@ Travis Drake (EklipZ) eklipz.io - tdrake0x45 at gmail)\n\tApril 2017\n\tGenerals.io Automated Client - https://github.com/harrischristiansen/generals-bot\n\tEklipZ bot - Tries to play generals lol\n'''\n\nimport logging\nfrom copy import deepcopy\nimport time\nimport json\nimport math\nfrom DataModels import TreeNode\nfrom collections import deque \nfrom queue import PriorityQueue\nfrom pprint import pprint,pformat\n\n\n\nclass PathMove(object):\n\tdef __init__(self, tile, next = None, prev = None, move_half = False):\n\t\tself.tile = tile\n\t\tself.next = next\n\t\tself.prev = prev\n\t\tself.move_half = move_half\n\n\tdef clone(self):\n\t\treturn PathMove(self.tile, self.next, self.prev)\n\n\tdef __str__(self):\n\t\treturn self.toString()\n\n\tdef toString(self):\n\t\tprevVal = \"[]\"\n\t\tif self.prev != None:\n\t\t\tprevVal = \"[{},{}]\".format(self.prev.tile.x, self.prev.tile.y)\n\t\tnextVal = \"[]\"\n\t\tif self.next != None:\n\t\t\tnextVal = \"[{},{}]\".format(self.next.tile.x, self.next.tile.y)\n\t\tmyVal = \"[{},{}]\".format(self.tile.x, self.tile.y)\n\n\t\tval = \"(prev:{} me:{} next:{})\".format(prevVal, myVal, nextVal)\n\t\treturn val\n\t#def __gt__(self, other):\n\t#\tif (other == None):\n\t#\t\treturn True\n\t#\treturn self.turn > other.turn\n\t#def __lt__(self, other):\n\t#\tif (other == None):\n\t#\t\treturn True\n\t#\treturn self.turn < other.turn\n\tdef __str__(self):\n\t\treturn self.toString()\n\n\nclass Path(object):\n\tdef __init__(self, value = 0):\n\t\tself.start = None\n\t\tself._pathQueue = deque()\n\t\tself.tail = None\n\t\tself._tileList = None\n\t\tself.value = value\n\tdef __gt__(self, other):\n\t\tif (other == None):\n\t\t\treturn True\n\t\treturn self.length > other.length\n\tdef __lt__(self, other):\n\t\tif (other == None):\n\t\t\treturn True\n\t\treturn self.length < other.length\n\n\t@property\n\tdef length(self):\n\t\treturn len(self._pathQueue) - 1\n\n\t@property\n\tdef tileSet(self):\n\t\treturn set(self.tileList)\n\t@tileSet.setter\n\tdef tileSet(self, value):\n\t\traise AssertionError(\"NO SETTING!\")\n\n\t@property\n\tdef tileList(self):\n\t\tif self._tileList == None:\n\t\t\tself._tileList = list()\n\t\t\tnode = self.start\n\t\t\twhile node != None:\n\t\t\t\tself._tileList.append(node.tile)\n\t\t\t\tnode = node.next\n\t\treturn list(self._tileList)\n\n\tdef add_next(self, nextTile):\n\t\tmove = PathMove(nextTile)\n\t\tmove.prev = self.tail\n\t\tif self.start == None:\n\t\t\tself.start = move\n\t\tif self.tail != None:\n\t\t\tself.tail.next = move\n\t\tif self._tileList != None:\n\t\t\tself._tileList.append(nextTile)\n\t\tself.tail = move\n\t\tself._pathQueue.append(move)\n\n\tdef add_start(self, startTile):\n\t\tmove = PathMove(startTile)\n\t\tif self.start != None:\n\t\t\tmove.next = self.start\n\t\t\tself.start.prev = move\n\t\tself.start = move\n\t\tif self._tileList != None:\n\t\t\tself._tileList.insert(0, startTile)\n\t\tself._pathQueue.appendleft(move)\n\n\tdef made_move(self):\n\t\tif len(self._pathQueue) == 0:\n\t\t\tlogging.info(\", bitch? Why you tryin to made_move when there aint no moves to made?\")\n\t\t\treturn\n\t\tif self._tileList != None:\n\t\t\tself._tileList.remove(self.start.tile)\n\t\tself.start = self.start.next\n\t\treturn self._pathQueue.popleft()\n\n\tdef remove_end(self):\n\t\tif len(self._pathQueue) == 0:\n\t\t\tlogging.info(\", bitch? Removing nothing??\")\n\t\t\treturn\n\t\tif self._tileList != None:\n\t\t\tself._tileList.remove(self.tail.tile)\n\t\tmove = self._pathQueue.pop()\n\t\tself.tail = self.tail.prev\n\t\tif self.tail != None:\n\t\t\tself.tail.next = None\n\t\treturn move\t\n\t\n\tdef convert_to_dist_dict(self):\n\t\tdist = 0\n\t\tdict = {}\n\t\tnode = self.start\n\t\twhile node != None:\n\t\t\tdict[node.tile] = dist\n\t\t\tnode = node.next\n\t\t\tdist += 1\n\t\treturn dict\n\n\tdef calculate_value(self, forPlayer):\n\t\tval = 0\n\t\tnode = self.start\n\t\ti = 0\n\t\twhile (node != None):\n\t\t\ttile = node.tile\n\t\t\tif tile.player == forPlayer:\n\t\t\t\tval += tile.army - 1\n\t\t\t\tif tile.isCity or tile.isGeneral:\n\t\t\t\t\tval += math.floor(i * 0.5)\n\t\t\telse:\n\t\t\t\tval -= tile.army + 1\n\t\t\t\tif tile.isCity or tile.isGeneral and tile.player != -1:\n\t\t\t\t\tval -= math.floor(i * 0.5)\n\t\t\tnode = node.next\n\t\t\ti += 1\n\t\tself.value = val\n\t\treturn val\n\n\tdef clone(self):\n\t\tnewPath = Path()\n\t\tnode = self.start\n\t\twhile node != None:\n\t\t\tnewPath.add_next(node.tile)\n\t\t\tnode = node.next\n\t\treturn newPath\n\n\tdef get_reversed(self):\n\t\tif (self.start == None or self.start.next == None):\n\t\t\treturn self.clone()\n\n\t\tnewPath = Path()\n\t\ttemp = self.tail\n\t\twhile temp != None:\n\t\t\tnewPath.add_next(temp.tile)\n\t\t\ttemp = temp.prev\n\t\tnewPath.value = self.value\n\t\treturn newPath\n\t\n\t# 10 things, want 3 end\n\tdef get_subsegment(self, count, end=False):\n\t\tnewPath = self.clone()\n\t\tlength = len(self._pathQueue)\n\t\ti = 0\n\t\twhile i < length - count:\n\t\t\ti += 1\n\t\t\tif end:\n\t\t\t\tnewPath.made_move()\n\t\t\telse:\n\t\t\t\tnewPath.remove_end()\n\t\treturn newPath\n\n\tdef __str__(self):\n\t\treturn self.toString()\n\n\tdef toString(self):\n\t\tval = \"[{} len {}] \".format(self.value, self.length)\n\t\tnode = self.start\n\t\twhile (node != None):\n\t\t\tval = val + str(node.tile.x) + \",\" + str(node.tile.y) + \" \"\n\t\t\tnode = node.next\n\t\treturn val\n\n\tdef convert_to_tree_nodes(self):\n\t\tcurTreeNode = None\n\t\tcurPathNode = self.start\n\t\tprevPathTile = None\n\t\tturn = 0\n\t\twhile curPathNode != None:\n\t\t\tprevTreeNode = curTreeNode\n\t\t\tcurTreeNode = TreeNode(curPathNode.tile, prevPathTile, turn)\n\t\t\tcurTreeNode.children.append(prevTreeNode)\n\t\t\tturn += 1\n\t\t\tprevPathTile = curPathNode.tile\n\t\t\tcurPathNode = curPathNode.next\n\t\treturn curTreeNode\n\n\t\n\t\t\n\t\n\ndef PathFromPathNode(pathEnd, path):\n\tif pathEnd == None or path == None:\n\t\treturn None\n\tpathLength = pathEnd.turn\n\tvalue = pathEnd.value\n\tcityCount = path.cityCount\n\tpathDict = pathEnd.pathDict\n\n\tnode = path\n\tturn = 0\n\t\n\tcurVal = node.tile.army\n\thead = PathMove(Move(node.tile, node.parent.tile), None, node.parent.value, turn)\n\t\n\tprev = head\n\tnode = node.parent\n\twhile (node.parent != None):\n\t\tturn += 1\n\t\tprev.next = PathMove(Move(node.tile, node.parent.tile), None, node.parent.value, turn)\n\t\tprev = prev.next\n\t\tnode = node.parent\n\t\n\treturn Path(head, pathLength, value, cityCount, pathDict)\n", "sub_path": "Path.py", "file_name": "Path.py", "file_ext": "py", "file_size_in_byte": 5803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "collections.deque", "line_number": 59, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 117, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 126, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 155, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 159, "usage_type": "call"}, {"api_name": "DataModels.TreeNode", "line_number": 216, "usage_type": "call"}]} +{"seq_id": "174287062", "text": "# coding=utf-8\n__author__ = 'Anatoli Kalysch'\nfrom copy import deepcopy\n\nfrom dynamic.TraceRepresentation import Traceline\nfrom lib.Register import get_reg_class\nfrom ui.PluginViewer import PluginViewer\nfrom ui.UIManager import QtGui, QtCore\n\n\n#############################\n### INPUT OUTPUT ANALYSIS ###\n#############################\nclass VMInputOuputViewer(PluginViewer):\n def __init__(self, input_set, output_set, output_ctx, title='Input/Output Analysis (legacy)'):\n # context should be a dictionary containing the backward traced result of each relevant register\n super(VMInputOuputViewer, self).__init__(title)\n self.input = input_set\n self.output = output_set\n self.ctx = output_ctx\n self.selection = {'upper':[], 'lower':[]}\n self.ucb_map = []\n self.lcb_map = []\n # brush map\n self.brush_map = {0:QtGui.QBrush(QtCore.Qt.white), # unselected values\n 1:QtGui.QBrush(QtGui.QColor(228,153,105)), # input values color\n 2:QtGui.QBrush(QtGui.QColor(183,166,173)), # output values color\n 3:QtGui.QBrush(QtGui.QColor(157,151,84))} # BOTH values, mix of both colors\n\n def PopulateModel(self):\n assert isinstance(self.ctx, dict)\n for key in self.ctx.keys():\n if get_reg_class(key) is not None:\n node = QtGui.QStandardItem('Register %s' % key)\n node_brush = set()\n for line in self.ctx[key]:\n assert isinstance(line, Traceline)\n tid = QtGui.QStandardItem('%s' % line.thread_id)\n addr = QtGui.QStandardItem('%x' % line.addr)\n disasm = QtGui.QStandardItem(line.disasm_str())\n comment = QtGui.QStandardItem(''.join(c for c in line.comment if line.comment is not None))\n context = QtGui.QStandardItem(''.join('%s:%s ' % (c, line.ctx[c]) for c in line.ctx.keys() if line.ctx is not None))\n ci = 0\n co = 0\n for selector in self.selection['upper']: # check input values\n if line.to_str_line().__contains__(selector) or line.to_str_line().__contains__(selector.lower()):\n ci = 1\n\n for selector in self.selection['lower']: # check output values\n if line.to_str_line().__contains__(selector) or line.to_str_line().__contains__(selector.lower()):\n co = 2\n\n node_brush.add(ci+co)\n tid.setBackground(self.brush_map[ci+co])\n addr.setBackground(self.brush_map[ci+co])\n disasm.setBackground(self.brush_map[ci+co])\n comment.setBackground(self.brush_map[ci+co])\n context.setBackground(self.brush_map[ci+co])\n\n node.appendRow([tid, addr, disasm, comment, context])\n try:\n if len(node_brush) == 3:\n color = 3\n else:\n color = max(node_brush)\n node.setBackground(self.brush_map[color])\n except:\n pass\n self.sim.appendRow(node)\n\n self.treeView.resizeColumnToContents(0)\n self.treeView.resizeColumnToContents(1)\n self.treeView.resizeColumnToContents(2)\n self.treeView.resizeColumnToContents(3)\n self.treeView.resizeColumnToContents(4)\n\n\n def PopulateUpperToolbar(self):\n assert isinstance(self.input, set)\n self.utb.addWidget(QtGui.QLabel('Input values found (check to highlight in trace): '))\n for value in self.input:\n self.ucb_map.append(QtGui.QCheckBox(value))\n self.ucb_map[-1].stateChanged.connect(lambda: self.OnValueChecked())\n self.utb.addWidget(self.ucb_map[-1])\n self.utb.addSeparator()\n\n def PopulateLowerToolbar(self):\n assert isinstance(self.input, set)\n self.ltb.addWidget(QtGui.QLabel('Output values found (check to highlight in trace): '))\n for value in self.output:\n self.lcb_map.append(QtGui.QCheckBox(value))\n self.lcb_map[-1].stateChanged.connect(lambda: self.OnValueChecked())\n self.ltb.addWidget(self.lcb_map[-1])\n self.ltb.addSeparator()\n\n def PopulateForm(self):\n ### init widgets\n # model\n self.sim = QtGui.QStandardItemModel()\n self.sim.setHorizontalHeaderLabels(['ThreadId', 'Address', 'Disasm', 'Stack Comment', 'CPU Context'])\n\n # toolbar\n self.utb = QtGui.QToolBar()\n self.ltb = QtGui.QToolBar()\n # tree view\n self.treeView = QtGui.QTreeView()\n self.treeView.setExpandsOnDoubleClick(True)\n self.treeView.setSortingEnabled(False)\n self.treeView.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\n self.treeView.setToolTip('Highlights:\\n Rust red - Input\\n Violet - Output\\n Olive - Both')\n\n ### populate widgets\n # fill model with data\n self.PopulateModel()\n # fill toolbar with data\n self.PopulateUpperToolbar()\n self.PopulateLowerToolbar()\n self.treeView.setModel(self.sim)\n # finalize layout\n layout = QtGui.QGridLayout()\n layout.addWidget(self.utb)\n layout.addWidget(self.treeView)\n layout.addWidget(self.ltb)\n\n self.parent.setLayout(layout)\n\n def CleanModel(self):\n self.sim.clear()\n self.sim.setHorizontalHeaderLabels(['ThreadId', 'Address', 'Disasm', 'Stack Comment', 'CPU Context'])\n\n def OnValueChecked(self):\n for check_box in self.ucb_map:\n if check_box.isChecked() and check_box.text() not in self.selection['upper']:\n self.selection['upper'].append(check_box.text())\n elif not check_box.isChecked() and check_box.text() in self.selection['upper']:\n self.selection['upper'].remove(check_box.text())\n\n for check_box in self.lcb_map:\n if check_box.isChecked() and check_box.text() not in self.selection['lower']:\n self.selection['lower'].append(check_box.text())\n elif not check_box.isChecked() and check_box.text() in self.selection['lower']:\n self.selection['lower'].remove(check_box.text())\n self.CleanModel()\n self.PopulateModel()\n\n def isVisible(self):\n try:\n return self.treeView.isVisible()\n except:\n return False\n", "sub_path": "ui/legacyUI/VMInputOutputViewer.py", "file_name": "VMInputOutputViewer.py", "file_ext": "py", "file_size_in_byte": 6602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "ui.PluginViewer.PluginViewer", "line_number": 14, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QBrush", "line_number": 25, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 25, "usage_type": "name"}, {"api_name": "ui.UIManager.QtCore.Qt", "line_number": 25, "usage_type": "attribute"}, {"api_name": "ui.UIManager.QtCore", "line_number": 25, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QBrush", "line_number": 26, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 26, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QColor", "line_number": 26, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui.QBrush", "line_number": 27, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 27, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QColor", "line_number": 27, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui.QBrush", "line_number": 28, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 28, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QColor", "line_number": 28, "usage_type": "call"}, {"api_name": "lib.Register.get_reg_class", "line_number": 33, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui.QStandardItem", "line_number": 34, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 34, "usage_type": "name"}, {"api_name": "dynamic.TraceRepresentation.Traceline", "line_number": 37, "usage_type": "argument"}, {"api_name": "ui.UIManager.QtGui.QStandardItem", "line_number": 38, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 38, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QStandardItem", "line_number": 39, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 39, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QStandardItem", "line_number": 40, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 40, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QStandardItem", "line_number": 41, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 41, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QStandardItem", "line_number": 42, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 42, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QLabel", "line_number": 80, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 80, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QCheckBox", "line_number": 82, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 82, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QLabel", "line_number": 89, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 89, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QCheckBox", "line_number": 91, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 91, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QStandardItemModel", "line_number": 99, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 99, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QToolBar", "line_number": 103, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 103, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QToolBar", "line_number": 104, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 104, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QTreeView", "line_number": 106, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 106, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QAbstractItemView", "line_number": 109, "usage_type": "attribute"}, {"api_name": "ui.UIManager.QtGui", "line_number": 109, "usage_type": "name"}, {"api_name": "ui.UIManager.QtGui.QGridLayout", "line_number": 120, "usage_type": "call"}, {"api_name": "ui.UIManager.QtGui", "line_number": 120, "usage_type": "name"}]} +{"seq_id": "627832213", "text": "from parse import compile\nfrom copy import copy, deepcopy\nfrom collections import defaultdict, Counter, deque\nfrom blist import *\nimport itertools\nimport math\nimport sys\n# sys.setrecursionlimit(10000)\n\ns = ''\ntiles = {}\nwhile s != 'done':\n s = input()\n tile = int(s.strip(':').split(' ')[1])\n s = input()\n grid = []\n while s != '' and s != 'done':\n grid.append(list(s))\n s = input()\n tiles[tile] = grid\n\ndef rotate3(grid):\n return rotate1(grid, 3)\n \ndef rotate2(grid):\n return rotate1(grid, 2)\n \ndef rotate1(cgrid, times=1):\n if times > 1:\n grid = rotate1(cgrid, times - 1)\n else:\n grid = cgrid\n n,m = len(grid), len(grid[0])\n newgrid = deepcopy(grid)\n for i in range(n):\n if i > 0 and i < n-1:\n iters = [0, m-1]\n else:\n iters = range(m)\n for j in iters:\n c = grid[i][j]\n newi = j\n newj = m - i - 1\n newgrid[newi][newj] = c \n return newgrid\n \ndef flip_vert(grid):\n n,m = len(grid), len(grid[0])\n newgrid = deepcopy(grid)\n for i in range(n):\n if i > 0 and i < n-1:\n iters = [0, m-1]\n else:\n iters = range(m)\n for j in iters:\n c = grid[i][j]\n newi = n - i - 1\n newj = j\n newgrid[newi][newj] = c \n return newgrid\n \ndef flip_hor(grid):\n n,m = len(grid), len(grid[0])\n newgrid = deepcopy(grid)\n for i in range(n):\n if i > 0 and i < n-1:\n iters = [0, m-1]\n else:\n iters = range(m)\n for j in iters:\n c = grid[i][j]\n newi = i\n newj = m - j - 1\n newgrid[newi][newj] = c \n return newgrid\n \ndef flip_both(grid):\n return flip_hor(flip_vert(grid))\n \nrots = [\n ('rot1', rotate1),\n ('rot2', rotate2),\n ('rot3', rotate3),\n ('rot_none', lambda x: x)]\n \nflips = [('fliph', flip_hor), ('flipv', flip_vert), ('flipb', flip_both),\n('flip_none', lambda x: x)]\n \ndef right_edge(grid):\n str = ''\n n,m = len(grid), len(grid[0])\n for i in range(n):\n str += grid[i][m-1]\n return str\n \ndef left_edge(grid):\n str = ''\n n,m = len(grid), len(grid[0])\n for i in range(n):\n str += grid[i][0]\n return str\n \ndef top_edge(grid):\n str = ''\n n,m = len(grid), len(grid[0])\n for j in range(m):\n str += grid[0][j]\n return str\n \ndef bottom_edge(grid):\n str = ''\n n,m = len(grid), len(grid[0])\n for j in range(m):\n str += grid[n - 1][j]\n return str\n \nedges = [('right', right_edge, left_edge), \n('left', left_edge, right_edge), ('top', top_edge, bottom_edge),\n('bottom', bottom_edge, top_edge)]\n\ndef match_edge(g1, g2):\n # determine which edges of g1, g2 matches to\n matches = []\n for (e, f1, f2) in edges:\n if f1(g1) == f2(g2):\n matches.append(e)\n return matches\n\n# in is 12x12\npremade_grids = {}\nadj = defaultdict(list)\nfor kk, t1 in enumerate(tiles):\n g1 = tiles[t1]\n print('Checking tile {}: {} of {}'.format(t1, kk + 1, len(tiles)))\n for t2 in tiles:\n if t2 == t1:\n continue\n g2 = tiles[t2]\n for (r1, r1func) in rots:\n for (f1, f1func) in flips:\n g1op = '{},{}'.format(r1,f1)\n if (t1, g1op) in premade_grids:\n newg1 = premade_grids[(t1, g1op)]\n else:\n newg1 = r1func(f1func(g1))\n premade_grids[(t1, g1op)] = newg1\n for (r, rfunc) in rots:\n for (f, ffunc) in flips:\n g2op = '{},{}'.format(r,f)\n if (t2, g2op) in premade_grids:\n newg2 = premade_grids[(t2, g2op)]\n else:\n newg2 = rfunc(ffunc(g2))\n premade_grids[(t2, g2op)] = newg2\n matches = match_edge(newg1, newg2)\n for m in matches:\n adj[(t1, g1op)].append(\n ((t2, g2op), m)\n )\n\ntile_matches = defaultdict(lambda: 0)\nfor t in tiles:\n for k in adj:\n (t1, op) = k\n if t == t1 and len(adj[k]) > 0:\n tile_matches[t] += 1 \n \n\ndef side_move(i, j, side):\n if side == 'top':\n return (i -1, j)\n if side == 'bottom':\n return (i+1, j)\n if side == 'right':\n return (i, j + 1)\n if side == 'left':\n return (i, j - 1)\n print('somethings wrong')\n exit()\n \n\n \ndef valid(i, j, grid):\n if i >= 0 and i < len(grid):\n if j >= 0 and j < len(grid[i]):\n return True\n return False\n \ndef fits(t, op, i, j, grid, adj):\n sides = ['bottom', 'top', 'left', 'right']\n for side in sides:\n i2,j2 = side_move(i, j, side)\n if valid(i2,j2,grid):\n if grid[i2][j2] is not None:\n t2,op2 = grid[i2][j2]\n if ((t2,op2), side) not in adj[(t,op)]:\n return False\n return True\n \ndef filled_in(grid):\n for row in grid:\n for c in row:\n if c is None:\n return False\n return True\n \ndef in_grid(t, grid):\n for row in grid:\n for c in row:\n if c is not None and t == c[0]:\n return True\n return False\n\ndef solve(t1, op1, i1, j1, grid, adj, depth=1, used=set()):\n print('${}checking {} {} at {},{}'.format(' '*(depth-2), t1,op1,i1,j1))\n if (t1,op1) in used:\n print('{}already used!'.format(' '*(depth-1)))\n return None\n if not fits(t1,op1,i1,j1,grid,adj):\n print('{}not fits! \\n{}'.format(' '*(depth-1),grid))\n return None\n # else:\n # print('{}fits!'.format(' '*(depth-1)))\n\n grid[i1][j1] = (t1, op1)\n used.add(t1)\n if depth -1 >= len(grid) * len(grid[0]):\n print('{}depth {} >= {}'.format(' '*(depth-1),depth, len(grid) * len(grid[0])))\n # if filled_in(grid):\n return grid\n # else:\n # used.remove(t1)\n # grid[i1][j1] = None\n # return None\n \n edges = adj[(t1, op1)]\n for ((t2, op2), side) in edges:\n if t1 == 3079 and op1 == 'rot_none,flip_none':\n print(t2,op2,side)\n if in_grid(t2,grid):\n if t1 == 3079 and op1 == 'rot_none,flip_none':\n print('was used')\n for g in grid:\n print(g)\n continue\n i2,j2 = side_move(i1,j1,side)\n if valid(i2,j2,grid):\n if t1 == 3079 and op1 == 'rot_none,flip_none':\n print('invalid move to', i2,j2)\n ret = solve(t2,op2,i2,j2,grid,adj,depth+1,used)\n if ret is not None:\n return ret\n\n used.remove(t1)\n grid[i1][j1] = None\n return None\n \nstart_t = None\nfor k in tiles:\n start_t = k\n break\n\nsidess = int(math.sqrt(len(tiles)))\nmeta_grid = [[None for i in range(sidess)] for j in range(sidess)]\nt1_ops = []\nfor k in adj:\n t,op = k\n if t == start_t:\n t1_ops.append(op)\n \ndef four_corners(grid):\n n,m = len(grid), len(grid[0])\n a,b,c,d = grid[0][0][0], grid[0][m-1][0], grid[n-1][m-1][0], grid[n-1][0][0]\n return (a*b*c*d, [a,b,c,d])\n \nfor i in range(sidess):\n for j in range(sidess):\n for op in t1_ops:\n m = deepcopy(meta_grid)\n ret = solve(start_t, op, i, j, m, adj)\n if ret is None:\n continue\n else:\n print(ret)\n print(four_corners(ret))\n exit()\n\n \n \n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n # hello", "sub_path": "2020/day20/solve.py", "file_name": "solve.py", "file_ext": "py", "file_size_in_byte": 7783, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "copy.deepcopy", "line_number": 34, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 49, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 64, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 131, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 161, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 263, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 279, "usage_type": "call"}]} +{"seq_id": "489251595", "text": "import os\nimport sys\nimport numpy as np\nimport astropy\nfrom astropy.table import Table, Row, hstack\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom astropy.wcs.utils import pixel_to_skycoord\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nimport seaborn as sns\nimport turtle_utils\nfrom turtle_utils import (\n slit_profile,\n extract_full_profile_from_pv,\n extract_slit_profile_from_imslit,\n get_orig_folder,\n find_slit_coords,\n subtract_sky_and_trim,\n make_three_plots,\n extract_line_and_regularize,\n make_slit_wcs,\n)\n\ntry:\n choice = int(sys.argv[1])\nexcept:\n choice = None\n\nrestwavs = {'ha': 6562.79, 'nii': 6583.45, 'nii_s': 6548.05}\n\n# Position of star\nRA0, Dec0 = 251.122998321, 23.7998586853\n\nsaturation = 6e4\n\n\nsns.set_palette('RdPu_d', 3)\n\ntable1 = Table.read('data/ha-slits.tab', format=\"ascii.tab\")\ntable2 = Table.read('data/align-ha.tab', format=\"ascii.tab\")\n# The align-ha table takes precedence if islit has been modified\ntable1.remove_column(\"islit\")\n# We already have spec in the ha-slits table\ntable2.remove_column(\"spec\") \ntable = hstack([table1, table2], join_type=\"exact\")\n\n# Photometric reference image\nphotom, = fits.open('data/imslit-ha/imslit-median.fits')\nwphot = WCS(photom.header)\nturtle_utils.VERBOSE = 1\nneighbors = [-2, -1, 1, 2]\nfor row in table:\n if choice is not None and row[\"id\"] != choice:\n # If we asked for a single spectrum, then skip all others\n continue\n spec_hdu, = fits.open(get_orig_folder(row[\"run\"]) + \"/\" + row[\"spec\"] + \".fits\")\n im_hdu, = fits.open(\"data/imslit/\" + row[\"imslit\"] + \"-wcs.fits\")\n # Mask out saturated pixels with NaN\n spec_hdu.data[spec_hdu.data > saturation] = np.nan\n # trim the edge or arrays since sometimes the outer pixels contain garbage\n spec_hdu.data = subtract_sky_and_trim(spec_hdu.data, row)\n spec_profile = extract_full_profile_from_pv(\n spec_hdu,\n wavaxis=row[\"wa\"],\n bandwidth=90.0,\n linedict=restwavs)\n imslit_profile = extract_slit_profile_from_imslit(im_hdu.data, row)\n print(row)\n jslit = np.arange(len(spec_profile))\n # jslit0_spec = np.average(jslit, weights=spec_profile)\n # jslit0_imslit = np.average(jslit, weights=imslit_profile)\n # jslit0_spec = np.nanargmax(spec_profile)\n # jslit0_imslit = np.nanargmax(imslit_profile)\n jslit0_spec = row[\"j0_s\"]\n jslit0_imslit = row[\"j0_i\"]\n print(jslit0_spec, jslit0_imslit, 'shift =', row[\"shift\"])\n slit_coords = find_slit_coords(row, im_hdu.header, spec_hdu.header)\n calib_profile = slit_profile(slit_coords['RA'], slit_coords['Dec'],\n photom.data, wphot)\n\n\n # Look at neighboring slit positions\n nb_calib_profiles = {}\n for nb in neighbors:\n nbrow = Table(row)[0] # This is the trick to get a copy of the row\n nbrow[\"islit\"] += nb\n nb_slit_coords = find_slit_coords(nbrow, im_hdu.header, spec_hdu.header)\n nb_calib_profiles[nb] = slit_profile(\n nb_slit_coords['RA'], nb_slit_coords['Dec'], photom.data, wphot)\n\n\n # Offset in arcsec along the slit\n slit_points = (np.arange(len(spec_profile)) - jslit0_spec)*slit_coords[\"ds\"]\n # Extra correction for optical halos that show up at +/- 40 arcsec\n halo_mask = np.abs(np.abs(slit_points) - 40.0) < 10.0\n halo_correction = np.median(spec_profile[halo_mask])\n spec_profile -= halo_correction\n\n # Take a window about profile peak to normalize spec_profile\n jslice0 = slice(jslit0_spec-20, jslit0_spec+20)\n # propagate saturated pixels to the calibration profile\n calib_profile_nan = calib_profile.copy()\n calib_profile_nan[~np.isfinite(spec_profile)] = np.nan\n rat0 = np.nansum(spec_profile[jslice0])/np.nansum(calib_profile_nan[jslice0])\n print('Coarse calibration: ratio =', rat0)\n spec_profile /= rat0\n\n\n # Make a figure comparing the profiles\n plt_prefix = f\"figs/{row.index:03d}-calib\"\n ratio = make_three_plots(spec_profile, calib_profile, plt_prefix,\n slit_points=slit_points,\n neighbors=nb_calib_profiles, db=row, sdb=slit_coords)\n\n # Write out the flux-calibrated spectra\n spec_hdu.data -= halo_correction\n spec_hdu.data /= rat0\n save_prefix = f\"data/pvextract/{row.index:03d}-{row['spec']}\"\n # The default header has minimal changes from the original\n pvheader = fits.Header(spec_hdu.header, copy=True)\n\n for lineid, wav0 in restwavs.items():\n pvdata, contdata, wavs = extract_line_and_regularize(\n spec_hdu.data, WCS(spec_hdu.header), wav0, row)\n pvdata = pvdata[None, :, :]\n contdata = contdata[None, :, :]\n\n # Create a fancy WCS object for slit coordinates (and a simple one too)\n wslit, wsimp = make_slit_wcs(row, slit_coords, wavs, jslit0_spec)\n # Set the rest wavelength for this line\n wslit.wcs.restwav = (wav0*u.Angstrom).to(u.m).value\n pvheader.update(wsimp.to_header())\n pvheader.update(wslit.to_header(key='A'))\n pvheader['WEIGHT'] = rat0\n\n pvfile = f\"{save_prefix}-{lineid}.fits\"\n fits.PrimaryHDU(header=pvheader,\n data=pvdata).writeto(pvfile, overwrite=True)\n fits.PrimaryHDU(header=pvheader,\n data=contdata).writeto(pvfile.replace(\".fits\",\n \"-cont.fits\"),\n overwrite=True)\n", "sub_path": "scripts/compare-slit-profiles.py", "file_name": "compare-slit-profiles.py", "file_ext": "py", "file_size_in_byte": 5506, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.argv", "line_number": 26, "usage_type": "attribute"}, {"api_name": "seaborn.set_palette", "line_number": 38, "usage_type": "call"}, {"api_name": "astropy.table.Table.read", "line_number": 40, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 40, "usage_type": "name"}, {"api_name": "astropy.table.Table.read", "line_number": 41, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 41, "usage_type": "name"}, {"api_name": "astropy.table.hstack", "line_number": 46, "usage_type": "call"}, {"api_name": "astropy.io.fits.open", "line_number": 49, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 49, "usage_type": "name"}, {"api_name": "astropy.wcs.WCS", "line_number": 50, "usage_type": "call"}, {"api_name": "turtle_utils.VERBOSE", "line_number": 51, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.open", "line_number": 57, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 57, "usage_type": "name"}, {"api_name": "turtle_utils.get_orig_folder", "line_number": 57, "usage_type": "call"}, {"api_name": "astropy.io.fits.open", "line_number": 58, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 58, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 60, "usage_type": "attribute"}, {"api_name": "turtle_utils.subtract_sky_and_trim", "line_number": 62, "usage_type": "call"}, {"api_name": "turtle_utils.extract_full_profile_from_pv", "line_number": 63, "usage_type": "call"}, {"api_name": "turtle_utils.extract_slit_profile_from_imslit", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 70, "usage_type": "call"}, {"api_name": "turtle_utils.find_slit_coords", "line_number": 78, "usage_type": "call"}, {"api_name": "turtle_utils.slit_profile", "line_number": 79, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 86, "usage_type": "call"}, {"api_name": "turtle_utils.find_slit_coords", "line_number": 88, "usage_type": "call"}, {"api_name": "turtle_utils.slit_profile", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.nansum", "line_number": 105, "usage_type": "call"}, {"api_name": "turtle_utils.make_three_plots", "line_number": 112, "usage_type": "call"}, {"api_name": "astropy.io.fits.Header", "line_number": 121, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 121, "usage_type": "name"}, {"api_name": "turtle_utils.extract_line_and_regularize", "line_number": 124, "usage_type": "call"}, {"api_name": "astropy.wcs.WCS", "line_number": 125, "usage_type": "call"}, {"api_name": "turtle_utils.make_slit_wcs", "line_number": 130, "usage_type": "call"}, {"api_name": "astropy.units.Angstrom", "line_number": 132, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 132, "usage_type": "name"}, {"api_name": "astropy.units.m", "line_number": 132, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.PrimaryHDU", "line_number": 138, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 138, "usage_type": "name"}, {"api_name": "astropy.io.fits.PrimaryHDU", "line_number": 140, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 140, "usage_type": "name"}]} +{"seq_id": "432127606", "text": "# Quick script to replace all AAS macros in a bibtex file\nimport os.path\nfrom sphinx.errors import ExtensionError\n\naas_macros_dict= {\n '\\\\apjsupp' : 'Astrophys. J. Supp.',\n '\\\\apjs' : 'Astrophys. J. Supp.',\n '\\\\appjlett': 'Astrophys. J. Lett.',\n '\\\\appjl' : 'Astrophys. J. Lett.',\n '\\\\apj' : 'Astrophys. J.',\n '\\\\aj' : 'Astron. J.',\n '\\\\mnras' : 'Mon. Not. Roy. Astron. Soc.',\n '\\\\baas' : 'Bull. AAS',\n '\\\\bain' : 'Bull. Astron. Inst. Netherlands',\n '\\\\aapr' : 'Astron. & Astrophys. Rev.',\n '\\\\aaps' : 'Astron. & Astrophys. Supp.',\n '\\\\astap' : 'Astron. & Astrophys.',\n '\\\\aap' : 'Astron. & Astrophys.',\n '\\\\araa' : 'Ann. Rev. Astron. Astrophys.',\n '\\\\actaa' : 'Acta Astronomica',\n '\\\\apss' : 'Astrophys. & Space Sci.',\n '\\\\jcap' : 'J. Cosmo & Astropart. Phys.',\n '\\\\nat' : 'Nature',\n '\\\\nar' : 'New Astron. Rev.',\n '\\\\na' : 'New Astron.',\n '\\\\pra' : 'Phys. Rev. A',\n '\\\\prb' : 'Phys. Rev. B',\n '\\\\prc' : 'Phys. Rev. C',\n '\\\\prd' : 'Phys. Rev. D',\n '\\\\pre' : 'Phys. Rev. E',\n '\\\\prl' : 'Phys. Rev. Lett.',\n '\\\\pasa' : 'Pub. Astron. Soc. Aus.',\n '\\\\pasp' : 'Pub. Astron. Soc. Pac.',\n '\\\\pasj' : 'Pub. Astron. Soc. Japan',\n '\\\\rmxaa' : 'Rev. Mex. Astron. & Astrofys.',\n '\\\\ssr' : 'Space Sci. Rev.',\n '\\\\applopt' : 'Appl. Opt.',\n '\\\\ao' : 'Appl. Opt.',\n '\\\\azh' : 'Astron. Zhu.',\n '\\\\bac' : 'Bull. Astron. Czech.',\n '\\\\caa' : 'Chin. Astron. Astrophys.',\n '\\\\cjaa' : 'Chin. J. Astron. Astrophys.',\n '\\\\icarus' : 'Icarus',\n '\\\\jrasc' : 'J. RAS Can.',\n '\\\\memras' : 'Mem. RAS',\n '\\\\qjras' : 'Quat. J. RAS',\n '\\\\skytel' : 'Sky & Telescope',\n '\\\\solphys' : 'Sol. Phys.',\n '\\\\sovast' : 'Sov. Astron.',\n '\\\\zap' : 'ZeitSch. Astrophys.',\n '\\\\iaucirc' : 'IAU Circs.',\n '\\\\aplett' : 'Astrophys. Lett.',\n '\\\\apspr' : 'Astrophys. Space Phys. Res.',\n '\\\\fcp' : 'Fund. Cosm. Phys.',\n '\\\\gca' : 'Geochim. Cosmochim. Acta',\n '\\\\grl' : 'Geophys. Res. Lett',\n '\\\\jcp' : 'J. Chem. Phys.',\n '\\\\jgr' : 'J. Geophys. Res.',\n '\\\\jqsrt' : 'J. Quant. Spec. Rad. Trans.',\n '\\\\memsai' : 'Mem. Soc. Astron. Ital.',\n '\\\\nphysa' : 'Nucl. Phys. A',\n '\\\\physrep' : 'Phys. Rep.',\n '\\\\physscr' : 'Phys. Scrip.',\n '\\\\planss' : 'Plan. Space. Sci.',\n '\\\\procspie': 'Proc. SPIE'\n }\n\ndef resolve(app,env,docnames):\n if not app.config.astrorefs_resolve_aas_macros:\n return\n if app.config.astrorefs_resolve_aas_macros_infile is None \\\n or app.config.astrorefs_resolve_aas_macros_outfile is None:\n raise ExtensionError('sphinx-astrorefs: when resolving AAS macros, need to give original and target bib file name as \"astrorefs_resolve_aas_macros_infile\" and \"astrorefs_resolve_aas_macros_outfile\"')\n with open(os.path.join(env.srcdir,\n app.config.astrorefs_resolve_aas_macros_infile),'r') \\\n as infile:\n with open(os.path.join(env.srcdir,\n app.config.astrorefs_resolve_aas_macros_outfile),'w') \\\n as outfile:\n for line in infile:\n for key in aas_macros_dict.keys():\n line= line.replace(key,aas_macros_dict[key])\n outfile.write(line)\n", "sub_path": "sphinx_astrorefs/resolve_aas.py", "file_name": "resolve_aas.py", "file_ext": "py", "file_size_in_byte": 3412, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sphinx.errors.ExtensionError", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 74, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "287881535", "text": "import networkx as nx\nimport numpy as np\nimport pandas as pd\n\nfrom ..util import memoize\nfrom ..visualize.color import red\n\n\nclass Networker(object):\n \"\"\"Networks (the kind with nodes and edges) \"\"\"\n weight_funs = ['no_weight', 'sq', 'arctan', 'arctan_sq']\n\n def __init__(self):\n # self.adjacencies_ = defaultdict()\n # self.graphs_ = defaultdict()\n self._default_node_color_mapper = lambda x: red\n self._default_node_size_mapper = lambda x: 300\n # self._last_adjacency_accessed = None\n # self._last_graph_accessed = None\n\n def get_weight_fun(self, fun_name='no_weight'):\n \"\"\" return a function that performs a common transform on distance \"\"\"\n _noweight = lambda x: x\n _sq = lambda x: x ** 2\n _arctan = lambda x: np.arctan(x)\n _arctan_sq = lambda x: np.arctan(x) ** 2\n if fun_name == 'no_weight':\n wt = _noweight\n elif fun_name == 'sq':\n wt = _sq\n elif fun_name == 'arctan':\n wt = _arctan\n elif fun_name == 'arctan_sq':\n wt = _arctan_sq\n else:\n raise ValueError\n return wt\n\n @memoize\n def adjacency(self, data, use_pc_1=True, use_pc_2=True,\n use_pc_3=True, use_pc_4=True, n_pcs=5):\n total_pcs = data.shape[1]\n use_cols = np.ones(total_pcs, dtype='bool')\n use_cols[n_pcs:] = False\n use_cols = use_cols * np.array(\n [use_pc_1, use_pc_2, use_pc_3, use_pc_4] + [True, ] * (\n total_pcs - 4))\n selected_cols = data.loc[:, use_cols]\n cov = np.cov(selected_cols)\n nrow, ncol = selected_cols.shape\n return pd.DataFrame(np.tril(cov * - (np.identity(nrow) - 1)),\n index=selected_cols.index, columns=data.index)\n\n @memoize\n def graph(self, adjacency, cov_cut=None, name=None,\n node_color_mapper=None,\n node_size_mapper=None,\n degree_cut=2,\n weight_function='no_weight'):\n\n if node_color_mapper is None:\n node_color_mapper = self._default_node_color_mapper\n if node_size_mapper is None:\n node_size_mapper = self._default_node_size_mapper\n\n weight = self.get_weight_fun(weight_function)\n graph = nx.Graph()\n for node_label in adjacency.index:\n node_color = node_color_mapper(node_label)\n node_size = node_size_mapper(node_label)\n graph.add_node(node_label, node_size=node_size,\n node_color=node_color)\n for cell1, others in adjacency.iterrows():\n for cell2, value in others.iteritems():\n if value > cov_cut:\n # cast to floats because write_gml doesn't like numpy\n # dtypes\n graph.add_edge(cell1, cell2, weight=float(weight(value)),\n inv_weight=float(1 / weight(value)),\n alpha=0.05)\n\n graph.remove_nodes_from(\n [k for k, v in graph.degree().iteritems() if v <= degree_cut])\n\n positions = nx.spring_layout(graph)\n\n return graph, positions\n", "sub_path": "flotilla/compute/network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 3206, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "visualize.color.red", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.arctan", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.tril", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 51, "usage_type": "call"}, {"api_name": "util.memoize", "line_number": 39, "usage_type": "name"}, {"api_name": "networkx.Graph", "line_number": 67, "usage_type": "call"}, {"api_name": "networkx.spring_layout", "line_number": 85, "usage_type": "call"}, {"api_name": "util.memoize", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "400669298", "text": "import pygame\nfrom pygame import *\nfrom time import sleep, time\nimport random\n\nballer = [0 for i in range(6)]\nGreen = (100, 250, 100)\npad_width = 1024\npad_height = 512\nplayer_x = 0\nplayer_y = 0\nRED = (255, 0, 0)\nscore = [0, 0]\n\nx = [0 for i in range(6)]\ny = [0 for i in range(6)]\nball_x = 0\nball_y = 0\n\ndef tackle(player_x, player_y):\n global ball_x, ball_y\n if abs(ball_x - player_x) <= 40 and abs(ball_y - player_y) <= 40:\n able = random.randrange(0, 6)\n if able >= 4:\n ball_y = player_y\n ball_x = player_x\n ball_move(ball_x, ball_y)\n return\n\ndef ball_move(x, y):\n global image_ball, ball_x, ball_y\n pygame.mixer.Sound.play(shoot_sound)\n ball_x = x\n ball_y = y\n gamepad.blit(image_ball, (x, y))\n\n\ndef ball_direction(player_x, player_y, ball_x, ball_y):\n if player_x - ball_x > 0:\n if player_y - ball_y > 0:\n return 'Ball LeftUp'\n if player_y - ball_y == 0:\n return 'Ball Left'\n if player_y - ball_y < 0:\n return 'Ball LeftDown'\n if player_x - ball_x == 0:\n if player_y - ball_y > 0:\n return 'Ball Up'\n if player_y - ball_y == 0:\n return 'Ball Stop'\n if player_y - ball_y < 0:\n return 'Ball Down'\n if player_x - ball_x < 0:\n if player_y - ball_y > 0:\n return 'Ball RightUp'\n if player_y - ball_y == 0:\n return 'Ball Right'\n if player_y - ball_y < 0:\n return 'Ball RightDown'\n\n\ndef ball_pass(player_x, player_y, dir, shoot_able):\n print('shootable = ', shoot_able)\n if shoot_able != 0:\n if dir == 'Ball Left':\n for i in range(1,30):\n ball_move(player_x-12*i, player_y)\n pygame.display.update()\n elif dir == 'Ball Right':\n for i in range(1,30):\n ball_move(player_x+12*i, player_y)\n pygame.display.update()\n elif dir == 'Ball Up':\n for i in range(1,30):\n ball_move(player_x, player_y-12*i)\n pygame.display.update()\n elif dir == 'Ball Down':\n for i in range(1,30):\n ball_move(player_x, player_y+12*i)\n pygame.display.update()\n elif dir == 'Ball LeftUp':\n for i in range(1,30):\n ball_move(player_x-12*i, player_y-12*i)\n pygame.display.update()\n elif dir == 'Ball RightUp':\n for i in range(1,30):\n ball_move(player_x+12*i, player_y-12*i)\n pygame.display.update()\n elif dir == 'Ball LeftDown':\n for i in range(1,30):\n ball_move(player_x-12*i, player_y+12*i)\n pygame.display.update()\n elif dir == 'Ball RightDown':\n for i in range(1,30):\n ball_move(player_x+12*i, player_y+12*i)\n pygame.display.update()\n\n\ndef dispMessage(text):\n global gamepad\n textfont = pygame.font.Font('freesansbold.ttf', 80)\n text = textfont.render(text, True, RED)\n textpos = text.get_rect()\n textpos.center = (pad_width / 2, pad_height / 2)\n gamepad.blit(text, textpos)\n pygame.display.update()\n sleep(2)\n\n\ndef goalchcker():\n global ball_x, ball_y, score\n if ((0 < ball_x < 75) or (949< ball_x < 1024)) and (206 < ball_y < 306):\n print('goal')\n pygame.mixer.music.fadeout(2000)\n dispMessage('Goal!!')\n if 0 < ball_x < 75:\n score[0] += 1\n else:\n score[1] += 1\n print(score)\n return True\n elif ((0 < ball_x < 120) or (904< ball_x < 1024)) and (106 < ball_y < 406):\n pass\n #보통 골대 가까이 가면 소리 커지거나 군중 소리 나던데...\n\n\ndef ballchecker(shoot, xchange, ychange):\n global x, y, ball_x, ball_y, baller\n for i in range(6):\n baller[i] = 0\n for i in range(6):\n if (abs(ball_x - x[i]) <= 20 and abs(ball_y - y[i]) <= 20) and (shoot == 0):\n ball_x = x[i]\n ball_y = y[i]\n elif(abs(ball_x - x[i]) <= 20 and abs(ball_y - y[i]) <= 20) and (shoot == 1):\n ball_x = x[i] + 2 * xchange\n ball_y = y[i] + 2 * ychange\n if abs(ball_x - x[i]) <= 30 and abs(ball_y - y[i]) <= 30:\n baller[i] = 1\n\n\ndef show_dis_play():\n global x, y, ball_x, ball_y, gamepad, image, image_ball, start_time\n\n for i in range(6):\n gamepad.blit(image[i], (x[i], y[i]))\n gamepad.blit(image_ball, (ball_x, ball_y))\n\n delta_time = int(time() - start_time)\n\n textfont = pygame.font.Font('freesansbold.ttf', 20)\n text = textfont.render('Time %02d : %02d Score %d : %d' % (delta_time // 60, delta_time % 60, score[0], score[1]), True, RED)\n textpos = text.get_rect()\n textpos.center = (pad_width / 2, 20)\n gamepad.blit(text, textpos)\n pygame.display.update()\n\n\ndef runGame():\n global x, y, ball_x, ball_y, gamepad, baller, image_map, score, start_time\n\n start_time = time()\n\n while True:\n pygame.init()\n pygame.mixer.music.set_volume(0.3)\n pygame.mixer.Sound.play(start_sound)\n pygame.mixer.music.load('bgm.wav')\n pygame.mixer.music.play(-1)\n pygame.mixer.music.set_volume(5.0)\n\n shooter_x = 0\n shooter_y = 0\n x = [410, 205, 205, 615, 820, 820]\n y = [256, 128, 384, 256, 128, 384]\n ball_x = 512\n ball_y = 256\n x_change = [0 for i in range(6)]\n y_change = [0 for i in range(6)]\n ball_x_change = 0\n ball_y_change = 0\n\n gamepad.blit(image_map, (0, 0))\n show_dis_play()\n pygame.display.update()\n dispMessage('%d : %d' % (score[0], score[1]))\n\n while True:\n for event in pygame.event.get():\n shoot_able = 0\n ballchecker(0, 0, 0)\n for i in range(6):\n if baller[i] == 1:\n print('baller now=', i)\n shoot_able = 1\n shooter_x = x[i]\n shooter_y = y[i]\n\n dir = ball_direction(shooter_x, shooter_y, ball_x, ball_y)\n print(dir)\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n key = [[K_2, K_q, K_w, K_e], [K_5, K_r, K_t, K_y], [K_8, K_u, K_i, K_o], [K_s, K_z, K_x, K_c],\n [K_g, K_v, K_b, K_n], [K_UP, K_LEFT, K_DOWN, K_RIGHT]]\n\n if event.type == pygame.KEYDOWN:\n for i in range(6):\n player = key[i]\n for j in range(4):\n direction = player[j]\n if event.key == direction:\n if j is 0:\n y_change[i] = -5\n ball_y_change = -10\n if j is 1:\n x_change[i] = -5\n ball_x_change = -10\n if j is 2:\n y_change[i] = 5\n ball_y_change = 10\n if j is 3:\n x_change[i] = 5\n ball_x_change = 10\n\n if event.type == pygame.KEYUP:\n for i in range(6):\n player = key[i]\n for j in range(4):\n direction = player[j]\n if event.key == direction:\n if j % 2:\n x_change[i] = 0\n ball_x_change = 0\n else:\n y_change[i] = 0\n ball_y_change = 0\n\n shoot = 0\n if event.type == pygame.KEYDOWN:\n if event.key in [K_SPACE, K_3, K_6, K_9, K_d, K_h]:\n print('Ball Shoot')\n print(i)\n ball_pass(ball_x, ball_y, dir, shoot_able)\n shoot = 1\n\n key_tackle = [K_1, K_4, K_7, K_a, K_f, K_LCTRL]\n if event.type == pygame.KEYDOWN:\n for i in range(0, 6):\n who = key_tackle[i]\n if event.key == who:\n print(\"Ball Tackle\")\n print(i)\n tackle(x[i], y[i])\n\n for i in range(6):\n x[i] += x_change[i]\n\n if x[i] < 0:\n x[i] = 0\n if x[i] > 1014:\n x[i] = 1014\n\n y[i] += y_change[i]\n\n if y[i] < 10:\n y[i] = 10\n if y[i] > 502:\n y[i] = 502\n\n for i in range(6):\n if baller[i] == 1:\n ball_x += ball_x_change\n ball_y += ball_y_change\n if ball_x < 10:\n ball_x = 10\n if ball_x > 1014:\n ball_x = 1014\n if ball_y < 10:\n ball_y = 10\n if ball_y > 502:\n ball_y = 502\n print('ball position=', ball_x, ball_y)\n ballchecker(shoot, ball_x_change, ball_y_change)\n if goalchcker():\n break\n gamepad.blit(image_map, (0, 0))\n show_dis_play()\n pygame.display.update()\n clock.tick(60)\n\ndef intro_screen():\n image_intro = pygame.image.load('intro.png')\n gamepad.blit(image_intro, (0, 0))\n pygame.display.update()\n pygame.mixer.music.load('intro.wav')\n pygame.mixer.music.play(-1)\n crashed = False\n while not crashed:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n crashed = True\n break\n if event.type == pygame.KEYDOWN:\n if event.key == K_SPACE:\n runGame()\n return\n\ndef initGame():\n global gamepad, image, image_ball, clock, image_goalsign, bgm_sound, shoot_sound, crowd_sound, start_sound, whistle_sound, image_map\n\n pygame.init()\n gamepad = pygame.display.set_mode((pad_width, pad_height))\n image = list()\n pygame.display.set_caption('PyFootball')\n bgm_sound = pygame.mixer.Sound('bgm.wav')\n shoot_sound = pygame.mixer.Sound('shoot.wav')\n crowd_sound = pygame.mixer.Sound('crowd.wav')\n start_sound = pygame.mixer.Sound('start.wav')\n whistle_sound = pygame.mixer.Sound('whistle.wav')\n\n for i in range(6):\n image.append(pygame.image.load('image%d.png' % (i + 1)))\n image_ball = pygame.image.load('image_ball.png')\n image_goalsign = pygame.image.load('image_goalsign.png')\n image_map = pygame.image.load('FutsalMap.png')\n clock = pygame.time.Clock()\n intro_screen()\n\ninitGame()\n", "sub_path": "6player+ball_shortened.py", "file_name": "6player+ball_shortened.py", "file_ext": "py", "file_size_in_byte": 11124, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "random.randrange", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound.play", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 68, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 72, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 88, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 92, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 101, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 106, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 106, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.mixer.music.fadeout", "line_number": 114, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 114, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 149, "usage_type": "call"}, {"api_name": "pygame.font.Font", "line_number": 151, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 156, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 156, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 162, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 165, "usage_type": "call"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 166, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 166, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound.play", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 168, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 169, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 169, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 170, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 170, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 185, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 185, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 189, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 189, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 201, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 202, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 227, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 241, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 249, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 290, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 290, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 294, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 294, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 296, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 296, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 297, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 297, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 298, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 298, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 301, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 301, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 302, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 305, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 313, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 314, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 314, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 316, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 316, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 317, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 317, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 318, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 318, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 319, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 319, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 320, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 320, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 321, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 321, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 324, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 324, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 325, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 325, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 326, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 326, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 327, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 327, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 328, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 328, "usage_type": "attribute"}]} +{"seq_id": "406405208", "text": "# -*- coding: utf-8 -*-\n\n# Copyright 2013 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport fuelmenu.common.urwidwrapper as widget\nfrom fuelmenu.settings import Settings\nimport logging\nimport netifaces\nimport re\nimport socket\nimport struct\nimport subprocess\nimport urwid\nimport urwid.raw_display\nimport urwid.web_display\nlog = logging.getLogger('fuelmenu.modulehelper')\nblank = urwid.Divider()\n\n\nclass ModuleHelper(object):\n\n @classmethod\n def load(cls, modobj):\n #Read in yaml\n defaultsettings = Settings().read(modobj.parent.defaultsettingsfile)\n oldsettings = defaultsettings.copy()\n oldsettings.update(Settings().read(modobj.parent.settingsfile))\n for setting in modobj.defaults.keys():\n if \"label\" in setting:\n continue\n elif \"/\" in setting:\n part1, part2 = setting.split(\"/\")\n modobj.defaults[setting][\"value\"] = oldsettings[part1][part2]\n else:\n modobj.defaults[setting][\"value\"] = oldsettings[setting]\n if modobj.netsettings and oldsettings[\"ADMIN_NETWORK\"][\"interface\"] \\\n in modobj.netsettings.keys():\n modobj.activeiface = oldsettings[\"ADMIN_NETWORK\"][\"interface\"]\n return oldsettings\n\n @classmethod\n def save(cls, modobj, responses):\n newsettings = dict()\n for setting in responses.keys():\n if \"/\" in setting:\n part1, part2 = setting.split(\"/\")\n if part1 not in newsettings:\n #We may not touch all settings, so copy oldsettings first\n newsettings[part1] = modobj.oldsettings[part1]\n newsettings[part1][part2] = responses[setting]\n else:\n newsettings[setting] = responses[setting]\n return newsettings\n\n @classmethod\n def cancel(self, cls, button=None):\n for index, fieldname in enumerate(cls.fields):\n if fieldname != \"blank\" and \"label\" not in fieldname:\n try:\n cls.edits[index].set_edit_text(cls.defaults[fieldname][\n 'value'])\n except AttributeError:\n log.warning(\"Field %s unable to reset text\" % fieldname)\n\n @classmethod\n def screenUI(cls, modobj, headertext, fields, defaults,\n showallbuttons=False, buttons_visible=True):\n\n log.debug(\"Preparing screen UI for %s\" % modobj.name)\n #Define text labels, text fields, and buttons first\n header_content = []\n for text in headertext:\n if isinstance(text, str):\n header_content.append(urwid.Text(text))\n else:\n header_content.append(text)\n\n edits = []\n toolbar = modobj.parent.footer\n for key in fields:\n #Example: key = hostname, label = Hostname, value = fuel-pm\n if key == \"blank\":\n edits.append(blank)\n elif defaults[key][\"value\"] == \"radio\":\n label = widget.TextLabel(defaults[key][\"label\"])\n if \"choices\" in defaults[key]:\n choices_list = defaults[key][\"choices\"]\n else:\n choices_list = [\"Yes\", \"No\"]\n choices = widget.ChoicesGroup(choices_list,\n default_value=\"Yes\",\n fn=modobj.radioSelect)\n columns = widget.Columns([('weight', 2, label),\n ('weight', 3, choices)])\n #Attach choices rb_group so we can use it later\n columns.rb_group = choices.rb_group\n edits.append(columns)\n elif defaults[key][\"value\"] == \"label\":\n edits.append(widget.TextLabel(defaults[key][\"label\"]))\n else:\n ispassword = \"PASSWORD\" in key.upper()\n caption = defaults[key][\"label\"]\n default = defaults[key][\"value\"]\n tooltip = defaults[key][\"tooltip\"]\n edits.append(\n widget.TextField(key, caption, 23, default, tooltip,\n toolbar, ispassword=ispassword))\n\n listbox_content = []\n listbox_content.extend(header_content)\n listbox_content.append(blank)\n listbox_content.extend(edits)\n listbox_content.append(blank)\n\n #Wrap buttons into Columns so it doesn't expand and look ugly\n if buttons_visible:\n #Button to check\n button_check = widget.Button(\"Check\", modobj.check)\n #Button to revert to previously saved settings\n button_cancel = widget.Button(\"Cancel\", modobj.cancel)\n #Button to apply (and check again)\n button_apply = widget.Button(\"Apply\", modobj.apply)\n\n if modobj.parent.globalsave and showallbuttons is False:\n check_col = widget.Columns([button_check])\n else:\n check_col = widget.Columns([button_check, button_cancel,\n button_apply, ('weight', 2, blank)])\n listbox_content.append(check_col)\n\n #Add everything into a ListBox and return it\n listwalker = widget.TabbedListWalker(listbox_content)\n screen = urwid.ListBox(listwalker)\n modobj.edits = edits\n modobj.walker = listwalker\n modobj.listbox_content = listbox_content\n return screen\n\n @classmethod\n def getNetwork(cls, modobj):\n \"\"\"Returns addr, broadcast, netmask for each network interface.\"\"\"\n re_ifaces = re.compile(r\"lo|vir|vbox|docker|veth\")\n for iface in netifaces.interfaces():\n if re_ifaces.search(iface):\n continue\n try:\n modobj.netsettings.update({iface: netifaces.ifaddresses(iface)[\n netifaces.AF_INET][0]})\n modobj.netsettings[iface][\"onboot\"] = \"Yes\"\n except (TypeError, KeyError):\n modobj.netsettings.update({iface: {\"addr\": \"\", \"netmask\": \"\",\n \"onboot\": \"no\"}})\n modobj.netsettings[iface]['mac'] = netifaces.ifaddresses(iface)[\n netifaces.AF_LINK][0]['addr']\n\n #Set link state\n try:\n with open(\"/sys/class/net/%s/operstate\" % iface) as f:\n content = f.readlines()\n modobj.netsettings[iface][\"link\"] = content[0].strip()\n except IOError:\n log.warning(\"Unable to read operstate file for %s\" % iface)\n modobj.netsettings[iface][\"link\"] = \"unknown\"\n #Change unknown link state to up if interface has an IP\n if modobj.netsettings[iface][\"link\"] == \"unknown\":\n if modobj.netsettings[iface][\"addr\"] != \"\":\n modobj.netsettings[iface][\"link\"] = \"up\"\n\n #Read bootproto from /etc/sysconfig/network-scripts/ifcfg-DEV\n modobj.netsettings[iface]['bootproto'] = \"none\"\n try:\n with open(\"/etc/sysconfig/network-scripts/ifcfg-%s\" % iface)\\\n as fh:\n for line in fh:\n if re.match(\"^BOOTPROTO=\", line):\n modobj.netsettings[iface]['bootproto'] = \\\n line.split('=')[1].strip()\n break\n except Exception:\n #Check for dhclient process running for this interface\n if modobj.getDHCP(iface):\n modobj.netsettings[iface]['bootproto'] = \"dhcp\"\n else:\n modobj.netsettings[iface]['bootproto'] = \"none\"\n modobj.gateway = modobj.get_default_gateway_linux()\n\n @classmethod\n def getDHCP(cls, iface):\n \"\"\"Returns True if the interface has a dhclient process running.\"\"\"\n noout = open('/dev/null', 'w')\n dhclient_running = subprocess.call([\"pgrep\", \"-f\", \"dhclient.*%s\" %\n (iface)], stdout=noout,\n stderr=noout)\n return (dhclient_running == 0)\n\n @classmethod\n def get_default_gateway_linux(cls):\n \"\"\"Read the default gateway directly from /proc.\"\"\"\n with open(\"/proc/net/route\") as fh:\n for line in fh:\n fields = line.strip().split()\n if fields[1] != '00000000' or not int(fields[3], 16) & 2:\n continue\n return socket.inet_ntoa(struct.pack(\" 1800:\n time_unit = 'days since 1801-01-01 00:00:00'\n elif y0 >= 0:\n time_unit = ('days since {:04d}-01-01 '\n '00:00:00'.format(time[0].year))\n else:\n raise InvalidParamsError('Time format not supported')\n\n with ncDataset(fpath, 'w', format='NETCDF4') as nc:\n nc.ref_hgt = ref_pix_hgt\n nc.ref_pix_lon = ref_pix_lon\n nc.ref_pix_lat = ref_pix_lat\n nc.ref_pix_dis = haversine(gdir.cenlon, gdir.cenlat,\n ref_pix_lon, ref_pix_lat)\n nc.climate_source = source\n if time[0].month == 1:\n nc.hydro_yr_0 = y0\n else:\n nc.hydro_yr_0 = y0 + 1\n nc.hydro_yr_1 = y1\n\n nc.createDimension('time', None)\n\n nc.author = 'OGGM'\n nc.author_info = 'Open Global Glacier Model'\n\n timev = nc.createVariable('time', 'i4', ('time',))\n\n tatts = {'units': time_unit}\n if calendar is None:\n calendar = 'standard'\n\n tatts['calendar'] = calendar\n try:\n numdate = netCDF4.date2num([t for t in time], time_unit,\n calendar=calendar)\n except TypeError:\n # numpy's broken datetime only works for us precision\n time = time.astype('M8[us]').astype(datetime.datetime)\n numdate = netCDF4.date2num(time, time_unit, calendar=calendar)\n\n timev.setncatts(tatts)\n timev[:] = numdate\n\n v = nc.createVariable('prcp', 'f4', ('time',), zlib=zlib)\n v.units = 'kg m-2'\n # this could be made more beautriful\n # just rough estimate\n if (len(prcp) > (nc.hydro_yr_1 - nc.hydro_yr_0 + 1) * 28 * 12 and\n temporal_resol == 'daily'):\n if source == 'ERA5_daily':\n v.long_name = (\"total daily precipitation amount, \"\n \"assumed same for each day of month\")\n elif source == 'WFDE5_daily_cru':\n v.long_name = (\"total daily precipitation amount\"\n \"sum of snowfall and rainfall\")\n elif (len(prcp) == (nc.hydro_yr_1 - nc.hydro_yr_0 + 1) * 12\n and temporal_resol == 'monthly'):\n v.long_name = 'total monthly precipitation amount'\n else:\n # v.long_name = 'total monthly precipitation amount'\n raise InvalidParamsError('there is a conflict in the'\n 'prcp timeseries, '\n 'please check temporal_resol')\n # warnings.warn(\"there might be a conflict in the prcp timeseries,\"\n # \"please check!\")\n\n v[:] = prcp\n\n v = nc.createVariable('temp', 'f4', ('time',), zlib=zlib)\n v.units = 'degC'\n if ((source == 'ERA5_daily' or source == 'WFDE5_daily_cru') and\n len(temp) > (y1 - y0) * 28 * 12 and temporal_resol == 'daily'):\n v.long_name = '2m daily temperature at height ref_hgt'\n elif source == 'ERA5_daily' and len(temp) <= (y1 - y0) * 30 * 12:\n raise InvalidParamsError('if the climate dataset (here source)'\n 'is ERA5_daily, temperatures should be in'\n 'daily resolution, please check or set'\n 'set source to another climate dataset')\n elif (source == 'WFDE5_daily_cru' and temporal_resol == 'monthly' and\n len(temp) > (y1 - y0) * 28 * 12):\n raise InvalidParamsError('something wrong in the implementation')\n else:\n v.long_name = '2m monthly temperature at height ref_hgt'\n\n v[:] = temp\n\n if gradient is not None:\n v = nc.createVariable('gradient', 'f4', ('time',), zlib=zlib)\n v.units = 'degC m-1'\n v.long_name = ('temperature gradient from local regression or'\n 'lapserates')\n v[:] = gradient\n\n if temp_std is not None:\n v = nc.createVariable('temp_std', 'f4', ('time',), zlib=zlib)\n v.units = 'degC'\n v.long_name = 'standard deviation of daily temperatures'\n v[:] = temp_std\n\n\n\n@entity_task(log, writes=['climate_historical_daily'])\ndef process_wfde5_data(gdir, y0=None, y1=None, temporal_resol='daily',\n output_filesuffix='_daily_WFDE5_CRU',\n cluster = True,\n climate_path='/home/lilianschuster/Schreibtisch/PhD/WP0_bayesian/WPx_WFDE5/'):\n \"\"\" TODO: let it work on the cluster first by giving there the right path...\n\n Processes and writes the WFDE5 daily baseline climate data for a glacier.\n into climate_historical_daily.nc\n\n Extracts the nearest timeseries and writes everything to a NetCDF file.\n This uses only the WFDE5 daily temperatures. The temperature lapse\n rate are used from ERA5dr.\n\n TODO: see _verified_download_helper no known hash for\n wfde5_daily_t2m_1979-2018_flat.nc and wfde5_glacier_invariant_flat\n ----------\n y0 : int\n the starting year of the timeseries to write. The default is to take\n the entire time period available in the file, but with this kwarg\n you can shorten it (to save space or to crop bad data)\n y1 : int\n the starting year of the timeseries to write. The default is to take\n the entire time period available in the file, but with this kwarg\n you can shorten it (to save space or to crop bad data)\n temporal_resol : str\n uses either daily (default) or monthly data\n output_filesuffix : str\n this add a suffix to the output file (useful to avoid overwriting\n previous experiments)\n cluster : bool\n default is False, if this is run on the cluster, set it to True,\n because we do not need to download the files\n\n \"\"\"\n\n # wfde5_daily for temperature and precipitation\n dataset = 'WFDE5_daily_cru'\n # but need temperature lapse rates from ERA5\n dataset_othervars = 'ERA5dr'\n\n # get the central longitude/latitudes of the glacier\n lon = gdir.cenlon + 360 if gdir.cenlon < 0 else gdir.cenlon\n lat = gdir.cenlat\n\n # cluster_path = '/home/www/oggm/climate/'\n # cluster_path = '/home/users/lschuster/'\n if cluster:\n path_tmp = climate_path + BASENAMES[dataset]['tmp']\n path_prcp = climate_path + BASENAMES[dataset]['prcp']\n path_inv = climate_path + BASENAMES[dataset]['inv']\n\n else:\n raise InvalidParamsError('not yet implemented...')\n path_tmp = get_ecmwf_file(dataset, 'tmp')\n path_prcp = get_ecmwf_file(dataset, 'pre')\n path_inv = get_ecmwf_file(dataset, 'inv')\n\n\n\n # Use xarray to read the data\n # would go faster with netCDF -.-\n # first temperature dataset\n with xr.open_dataset(path_tmp) as ds:\n assert ds.longitude.min() >= 0\n\n # set temporal subset for the ts data (hydro years)\n if gdir.hemisphere == 'nh':\n sm = cfg.PARAMS['hydro_month_nh']\n elif gdir.hemisphere == 'sh':\n sm = cfg.PARAMS['hydro_month_sh']\n\n em = sm - 1 if (sm > 1) else 12\n\n yrs = ds['time.year'].data\n y0 = yrs[0] if y0 is None else y0\n y1 = yrs[-1] if y1 is None else y1\n\n if y1 > 2018 or y0 < 1979:\n text = 'The climate files only go from 1979--2018,\\\n choose another y0 and y1'\n raise InvalidParamsError(text)\n # if default settings: this is the last day in March or September\n time_f = '{}-{:02d}'.format(y1, em)\n end_day = int(ds.sel(time=time_f).time.dt.daysinmonth[-1].values)\n\n # this was tested also for hydro_month = 1\n ds = ds.sel(time=slice('{}-{:02d}-01'.format(y0, sm),\n '{}-{:02d}-{}'.format(y1, em, end_day)))\n\n try:\n # computing all the distances and choose the nearest gridpoint\n c = (ds.longitude - lon)**2 + (ds.latitude - lat)**2\n ds = ds.isel(points=c.argmin())\n # I turned this around\n except ValueError:\n ds = ds.sel(longitude=lon, latitude=lat, method='nearest')\n # normally if I do the flattening, this here should not occur\n\n # if we want to use monthly mean tempeatures of wfde5 and\n # standard deviation of daily temperature:\n if temporal_resol == 'monthly':\n Tair_std = ds.resample(time='MS').std().Tair\n temp_std = Tair_std.data\n ds = ds.resample(time='MS').mean()\n ds['longitude'] = ds.longitude.isel(time=0)\n ds['latitude'] = ds.latitude.isel(time=0)\n elif temporal_resol == 'daily':\n temp_std = None\n else:\n raise InvalidParamsError('temporal_resol can only be monthly'\n 'or daily!')\n\n\n # temperature should be in degree Celsius for the glacier climate files\n temp = ds['Tair'].data - 273.15\n time = ds.time.data\n\n ref_lon = float(ds['longitude'])\n ref_lat = float(ds['latitude'])\n\n ref_lon = ref_lon - 360 if ref_lon > 180 else ref_lon\n\n # precipitation: similar ar temperature\n with xr.open_dataset(path_prcp) as ds:\n assert ds.longitude.min() >= 0\n\n yrs = ds['time.year'].data\n y0 = yrs[0] if y0 is None else y0\n y1 = yrs[-1] if y1 is None else y1\n # Attention here we take the same y0 and y1 as given from the\n # daily tmp dataset (goes till end of 2018)\n\n # attention if daily data, need endday!!!\n ds = ds.sel(time=slice('{}-{:02d}-01'.format(y0, sm),\n '{}-{:02d}-{}'.format(y1, em, end_day)))\n try:\n # wfde5 prcp is also flattened\n c = (ds.longitude - lon)**2 + (ds.latitude - lat)**2\n ds = ds.isel(points=c.argmin())\n except ValueError:\n # this should not occur\n ds = ds.sel(longitude=lon, latitude=lat, method='nearest')\n\n # if we want to use monthly summed up wfde5 precipitation:\n if temporal_resol == 'monthly':\n ds = ds.resample(time='MS').sum()\n elif temporal_resol == 'daily':\n pass\n # the prcp data of wfde5 is in kg m-2 day-1 ~ mm/day\n # or in kg m-2 month-1 ~ mm/month\n prcp = ds['tp'].data # * 1000\n # just assume that precipitation is every day the same:\n # prcp in daily reso prcp = np.repeat(prcp, ds['time.daysinmonth'])\n # Attention the unit is now prcp per day\n # (not per month as in OGGM default:\n # prcp = ds['tp'].data * 1000 * ds['time.daysinmonth']\n\n # wfde5 invariant file\n with xr.open_dataset(path_inv) as ds:\n assert ds.longitude.min() >= 0\n ds = ds.isel(time=0)\n try:\n # Flattened wfde5_inv (only possibility at the moment)\n c = (ds.longitude - lon)**2 + (ds.latitude - lat)**2\n ds = ds.isel(points=c.argmin())\n except ValueError:\n # this should not occur\n ds = ds.sel(longitude=lon, latitude=lat, method='nearest')\n\n # wfde5 inv ASurf/hgt is already in hgt coordinates\n # G = cfg.G # 9.80665\n hgt = ds['ASurf'].data # / G\n\n\n # here we need to use the ERA5dr data ...\n # there are no lapse rates from wfde5 !!!\n path_lapserates = get_ecmwf_file(dataset_othervars, 'lapserates')\n with xr.open_dataset(path_lapserates) as ds:\n assert ds.longitude.min() >= 0\n\n yrs = ds['time.year'].data\n y0 = yrs[0] if y0 is None else y0\n y1 = yrs[-1] if y1 is None else y1\n # Attention here we take the same y0 and y1 as given from the\n # daily tmp dataset (goes till end of 2018)\n\n ds = ds.sel(time=slice('{}-{:02d}-01'.format(y0, sm),\n '{}-{:02d}-01'.format(y1, em)))\n\n # no flattening done for the ERA5dr gradient dataset\n ds = ds.sel(longitude=lon, latitude=lat, method='nearest')\n\n # get the monthly gradient values\n gradient = ds['lapserate'].data\n if temporal_resol == 'monthly':\n pass\n elif temporal_resol == 'daily':\n # gradient needs to be restructured to have values for each day\n # when wfde5_daily is applied\n gradient = np.repeat(gradient, ds['time.daysinmonth'])\n # assume same gradient for each day\n\n if temporal_resol == 'monthly':\n if output_filesuffix == '_daily':\n output_filesuffix = ''\n dataset = 'WFDE5_monthly_cru'\n elif temporal_resol == 'daily' and output_filesuffix == '':\n output_filesuffix = '_daily'\n # OK, ready to write\n write_climate_file(gdir, time, prcp, temp, hgt, ref_lon, ref_lat,\n filesuffix=output_filesuffix,\n temporal_resol=temporal_resol,\n gradient=gradient,\n temp_std=temp_std,\n source=dataset,\n file_name='climate_historical')\n # This is now a new function, maybe it would better to make a general\n # process_daily_data function where ERA5_daily and WFDE5_daily \n # but is used, so far, only for ERA5_daily as source dataset ..\n\n\n@entity_task(log, writes=['climate_historical_daily'])\ndef process_era5_daily_data(gdir, y0=None, y1=None, output_filesuffix='_daily',\n cluster=False):\n \"\"\"Processes and writes the era5 daily baseline climate data for a glacier.\n into climate_historical_daily.nc\n\n Extracts the nearest timeseries and writes everything to a NetCDF file.\n This uses only the ERA5 daily temperatures. The precipitation, lapse\n rate and standard deviations are used from ERA5dr.\n\n TODO: see _verified_download_helper no known hash for\n era5_daily_t2m_1979-2018_flat.nc and era5_glacier_invariant_flat\n ----------\n y0 : int\n the starting year of the timeseries to write. The default is to take\n the entire time period available in the file, but with this kwarg\n you can shorten it (to save space or to crop bad data)\n y1 : int\n the starting year of the timeseries to write. The default is to take\n the entire time period available in the file, but with this kwarg\n you can shorten it (to save space or to crop bad data)\n output_filesuffix : str\n this add a suffix to the output file (useful to avoid overwriting\n previous experiments)\n cluster : bool\n default is False, if this is run on the cluster, set it to True,\n because we do not need to download the files\n\n \"\"\"\n\n # era5daily only for temperature\n dataset = 'ERA5_daily'\n # for the other variables use the data of ERA5dr\n dataset_othervars = 'ERA5dr'\n\n # get the central longitude/latidudes of the glacier\n lon = gdir.cenlon + 360 if gdir.cenlon < 0 else gdir.cenlon\n lat = gdir.cenlat\n\n cluster_path = '/home/www/oggm/climate/'\n\n if cluster:\n path = cluster_path + BASENAMES[dataset]['tmp']\n else:\n path = get_ecmwf_file(dataset, 'tmp')\n\n # Use xarray to read the data\n # would go faster with netCDF -.-\n with xr.open_dataset(path) as ds:\n assert ds.longitude.min() >= 0\n\n # set temporal subset for the ts data (hydro years)\n if gdir.hemisphere == 'nh':\n sm = cfg.PARAMS['hydro_month_nh']\n elif gdir.hemisphere == 'sh':\n sm = cfg.PARAMS['hydro_month_sh']\n\n em = sm - 1 if (sm > 1) else 12\n\n yrs = ds['time.year'].data\n y0 = yrs[0] if y0 is None else y0\n y1 = yrs[-1] if y1 is None else y1\n\n if y1 > 2018 or y0 < 1979:\n text = 'The climate files only go from 1979--2018,\\\n choose another y0 and y1'\n raise InvalidParamsError(text)\n # if default settings: this is the last day in March or September\n time_f = '{}-{:02d}'.format(y1, em)\n end_day = int(ds.sel(time=time_f).time.dt.daysinmonth[-1].values)\n\n # this was tested also for hydro_month = 1\n ds = ds.sel(time=slice('{}-{:02d}-01'.format(y0, sm),\n '{}-{:02d}-{}'.format(y1, em, end_day)))\n\n try:\n # computing all the distances and choose the nearest gridpoint\n c = (ds.longitude - lon)**2 + (ds.latitude - lat)**2\n ds = ds.isel(points=c.argmin())\n # I turned this around\n except ValueError:\n ds = ds.sel(longitude=lon, latitude=lat, method='nearest')\n # normally if I do the flattening, this here should not occur\n\n # temperature should be in degree Celsius for the glacier climate files\n temp = ds['t2m'].data - 273.15\n time = ds.time.data\n\n ref_lon = float(ds['longitude'])\n ref_lat = float(ds['latitude'])\n\n ref_lon = ref_lon - 360 if ref_lon > 180 else ref_lon\n\n # pre should be done as in ERA5dr datasets\n with xr.open_dataset(get_ecmwf_file(dataset_othervars, 'pre')) as ds:\n assert ds.longitude.min() >= 0\n\n yrs = ds['time.year'].data\n y0 = yrs[0] if y0 is None else y0\n y1 = yrs[-1] if y1 is None else y1\n # Attention here we take the same y0 and y1 as given from the\n # daily tmp dataset (goes till end of 2018)\n\n ds = ds.sel(time=slice('{}-{:02d}-01'.format(y0, sm),\n '{}-{:02d}-01'.format(y1, em)))\n try:\n # prcp is not flattened, so this here should work normally\n ds = ds.sel(longitude=lon, latitude=lat, method='nearest')\n except ValueError:\n # if Flattened ERA5_precipitation?\n c = (ds.longitude - lon)**2 + (ds.latitude - lat)**2\n ds = ds.isel(points=c.argmin())\n\n # the prcp dataset needs to be restructured to have values for each day\n prcp = ds['tp'].data * 1000\n # just assume that precipitation is every day the same:\n prcp = np.repeat(prcp, ds['time.daysinmonth'])\n # Attention the unit is now prcp per day\n # (not per month as in OGGM default:\n # prcp = ds['tp'].data * 1000 * ds['time.daysinmonth']\n\n if cluster:\n path_inv = cluster_path + BASENAMES[dataset]['inv']\n else:\n path_inv = get_ecmwf_file(dataset, 'inv')\n with xr.open_dataset(path_inv) as ds:\n assert ds.longitude.min() >= 0\n ds = ds.isel(time=0)\n try:\n # Flattened ERA5_invariant (only possibility at the moment)\n c = (ds.longitude - lon)**2 + (ds.latitude - lat)**2\n ds = ds.isel(points=c.argmin())\n except ValueError:\n # this should not occur\n ds = ds.sel(longitude=lon, latitude=lat, method='nearest')\n\n G = cfg.G # 9.80665\n hgt = ds['z'].data / G\n\n gradient = None\n temp_std = None\n path_lapserates = get_ecmwf_file(dataset_othervars, 'lapserates')\n with xr.open_dataset(path_lapserates) as ds:\n assert ds.longitude.min() >= 0\n\n yrs = ds['time.year'].data\n y0 = yrs[0] if y0 is None else y0\n y1 = yrs[-1] if y1 is None else y1\n # Attention here we take the same y0 and y1 as given from the\n # daily tmp dataset (goes till end of 2018)\n\n ds = ds.sel(time=slice('{}-{:02d}-01'.format(y0, sm),\n '{}-{:02d}-01'.format(y1, em)))\n\n # no flattening done for the ERA5dr gradient dataset\n ds = ds.sel(longitude=lon, latitude=lat, method='nearest')\n\n # get the monthly gradient values\n gradient = ds['lapserate'].data\n\n # gradient needs to be restructured to have values for each day\n gradient = np.repeat(gradient, ds['time.daysinmonth'])\n # assume same gradient for each day\n\n # OK, ready to write\n write_climate_file(gdir, time, prcp, temp, hgt, ref_lon, ref_lat,\n filesuffix=output_filesuffix,\n temporal_resol='daily',\n gradient=gradient,\n temp_std=temp_std,\n source=dataset,\n file_name='climate_historical')\n # This is now a new function, which could also work for other climates\n # but is used, so far, only for ERA5_daily as source dataset ..\n\n\n# TODO:\n# - name: TIModel? + DDFModel?\nclass TIModel(MassBalanceModel):\n \"\"\"Different mass balance modules compatible to OGGM with one flowline\n\n so far this is only tested for the Huss flowlines\n \"\"\"\n\n def __init__(self, gdir, melt_f, residual=0,\n mb_type='mb_daily', N=100, loop=False,\n grad_type='cte', filename='climate_historical',\n repeat=False, ys=None, ye=None,\n t_solid=0, t_liq=2, t_melt=0, prcp_fac=2.5,\n default_grad=-0.0065,\n temp_local_gradient_bounds=[-0.009, -0.003],\n # check_climate=True,\n SEC_IN_YEAR=SEC_IN_YEAR,\n SEC_IN_MONTH=SEC_IN_MONTH,\n SEC_IN_DAY=SEC_IN_DAY,\n baseline_climate=None,\n ):\n \"\"\" Initialize.\n Parameters\n ----------\n gdir : GlacierDirectory\n the glacier directory\n melt_f : float\n melt temperature sensitivity factor per month (kg /m² /mth /K),\n need to be prescribed, e.g. such that\n |mean(MODEL_MB)-mean(REF_MB)|--> 0\n residual : float, optional\n default is to use a residual of zero [mm we yr-1]\n Note that this residual is *substracted* from the computed MB.\n Indeed: residual = MODEL_MB - REFERENCE_MB.\n ToDO: maybe change the sign?,\n opposite to OGGM \"MB terms + residual\"\n mb_type: str\n three types: 'mb_daily' (default: use temp_std and N percentiles),\n 'mb_monthly' (same as default OGGM mass balance),\n 'mb_real_daily' (use daily temperature values).\n OGGM \"MB terms + residual\"\n the mb_type only work if the baseline_climate of gdir is right\n N : int\n number of percentiles used to generate gaussian-like daily\n temperatures from daily std and mean monthly temp\n loop : bool\n the way how the matrix multiplication is done,\n using np.matmul or a loop(default: False)\n only applied if mb_type is 'mb_daily'\n which one is faster?\n grad_type : str\n three types of applying the temperature gradient:\n 'cte' (default, constant lapse rate, set to default_grad,\n same as in default OGGM)\n 'var_an_cycle' (varies spatially and over annual cycle,\n but constant over the years)\n 'var' (varies spatially & temporally as in the climate files)\n filename : str, optional\n set to a different BASENAME if you want to use alternative climate\n data, default is climate_historical\n input_filesuffix : str,\n the file suffix of the input climate file, default is '',\n if ERA5_daily with daily temperatures, it is set to _daily\n repeat : bool\n Whether the climate period given by [ys, ye] should be repeated\n indefinitely in a circular way\n ys : int\n The start of the climate period where the MB model is valid\n (default: the period with available data)\n ye : int\n The end of the climate period where the MB model is valid\n (default: the period with available data)\n t_solid : float\n temperature threshold for solid precipitation\n (degree Celsius, default 0)\n t_liq: float\n temperature threshold for liquid precipitation\n (degree Celsius, default 2)\n t_melt : float\n temperature threshold where snow/ice melts\n (degree Celsius, default 0)\n default_grad : float,\n constant lapse rate (temperature gradient, default: -0.0065 m/K)\n if grad_type != cte, then this value is not used\n but instead the changing lapse rate from the climate datasets\n temp_local_gradient_bounds : [float, float],\n if grad_type != cte and the lapse rate does not lie in this range,\n set it instead to these minimum, maximum gradients\n (default: [-0.009, -0.003] m/K)\n SEC_IN_YEAR: float\n seconds in a year (default: 31536000s),\n maybe this could be changed\n SEC_IN_MONTH: float\n seconds in a month (default: 2628000s),\n maybe this could be changed as not each\n month has the same amount of seconds,\n in February can be a difference of 8%\n\n Attributes\n ----------\n temp_bias : float, default 0\n Add a temperature bias to the time series\n prcp_fac : float, >0\n multiplicative precipitation correction factor (default 2.5)\n \"\"\"\n # melt_f is only initiated here, and not used in __init__.py\n # so it does not matter if it is changed\n self.melt_f = melt_f\n if self.melt_f != None and self.melt_f <= 0:\n raise InvalidParamsError('melt_f has to be above zero!')\n # but there is a problem with prcp_fac,\n # as self.prcp is produced by changing prcp_fac\n # so there is no self.prcp_fac here\n # and we need to update the prcp via prcp_fac by a property\n if prcp_fac <= 0:\n raise InvalidParamsError('prcp_fac has to be above zero!')\n # to allow prcp_fac to be changed after instantiation\n # prescribe the prcp_fac as it is instantiated\n self._prcp_fac = prcp_fac\n # same for temp bias\n self._temp_bias = 0.\n\n self.residual = residual\n\n # Parameters (from cfg.PARAMS in OGGM default)\n self.t_solid = t_solid\n self.t_liq = t_liq\n self.t_melt = t_melt\n self.N = N\n self.mb_type = mb_type\n self.loop = loop\n self.grad_type = grad_type\n # default rho is 900 kg/m3\n self.rho = cfg.PARAMS['ice_density']\n\n # Public attrs\n self.hemisphere = gdir.hemisphere\n self.repeat = repeat\n\n self.SEC_IN_YEAR = SEC_IN_YEAR\n self.SEC_IN_MONTH = SEC_IN_MONTH\n self.SEC_IN_DAY = SEC_IN_DAY\n # what is this???\n self.valid_bounds = [-1e4, 2e4] # in m\n\n\n # check if the right climate is used for the right mb_type\n # these checks might be changed if there are more climate datasets\n # available!!!\n # only have daily temperatures for 'ERA5_daily'\n if baseline_climate == None:\n baseline_climate = gdir.get_climate_info()['baseline_climate_source']\n\n if mb_type != 'mb_real_daily':\n # cfg.PARAMS['baseline_climate'] = 'ERA5dr'\n input_filesuffix = '_monthly_{}'.format(baseline_climate)\n else:\n # this is just the climate \"type\"\n # cfg.PARAMS['baseline_climate'] = 'ERA5_daily'\n input_filesuffix = '_daily_{}'.format(baseline_climate)\n\n self._input_filesuffix = input_filesuffix\n #if (self.mb_type == 'mb_real_daily' and\n # (baseline_climate != 'ERA5dr' and\n # baseline_climate != 'WFDE5_CRU')):\n # text = ('wrong climate for mb_real_daily, need to do e.g. '\n # 'process_era5_daily_data(gd) to produce daily_ERA5dr'\n # 'or process_wfde5_data(gd) for daily_WFDE5_CRU')\n # raise InvalidParamsError(text)\n # mb_monthly does not work when daily temperatures are used\n if self.mb_type == 'mb_monthly' and input_filesuffix == 'daily_ERA5dr':\n text = ('wrong climate for mb_monthly, need to do e.g.'\n 'oggm.shop.ecmwf.process_ecmwf_data(gd, dataset=\"ERA5dr\")')\n raise InvalidParamsError(text)\n # mb_daily needs temp_std\n if self.mb_type == 'mb_daily' and input_filesuffix == 'daily_ERA5dr':\n text = 'wrong climate for mb_daily, need to do e.g. \\\n oggm.shop.ecmwf.process_ecmwf_data(gd, dataset = \"ERA5dr\")'\n raise InvalidParamsError(text)\n\n # Read climate file\n fpath = gdir.get_filepath(filename, filesuffix=input_filesuffix)\n\n # used xarray instead of netCDF4, is this slower?\n with xr.open_dataset(fpath) as xr_nc:\n if self.mb_type == 'mb_real_daily' or self.mb_type == 'mb_monthly':\n # even if there is temp_std inside the dataset, we won't use\n # it for these mb_types\n self.temp_std = np.NaN\n else:\n try:\n self.temp_std = xr_nc['temp_std'].values.astype(np.float64)\n except KeyError:\n text = ('The applied climate has no temp std, do e.g.'\n 'oggm.shop.ecmwf.process_ecmwf_data'\n '(gd, dataset=\"ERA5dr\")')\n\n raise InvalidParamsError(text)\n\n # goal is to get self.years/self.months in hydro_years\n if self.mb_type != 'mb_real_daily':\n time = xr_nc.time\n ny, r = divmod(len(time), 12)\n if r != 0:\n raise ValueError('Climate data should be N full years')\n # This is where we switch to hydro float year format\n # Last year gives the tone of the hydro year\n self.years = np.repeat(np.arange(xr_nc.time[-1].dt.year-ny+1,\n xr_nc.time[-1].dt.year+1), 12)\n self.months = np.tile(np.arange(1, 13), ny)\n\n elif self.mb_type == 'mb_real_daily':\n # use pandas to convert month/year to hydro_years\n # this has to be done differently than above because not\n # every month, year has the same amount of days\n pd_test = pd.DataFrame(xr_nc.time.to_series().dt.year.values,\n columns=['year'])\n pd_test.index = xr_nc.time.to_series().values\n pd_test['month'] = xr_nc.time.to_series().dt.month.values\n pd_test['hydro_year'] = np.NaN\n # get the month where the hydrological month starts\n # as chosen from the gdir climate file\n # default 10 for 'nh', 4 for 'sh'\n hydro_month_start = int(xr_nc.time[0].dt.month.values)\n if hydro_month_start == 1:\n # hydro_year corresponds to normal year\n pd_test.loc[pd_test.index.month >= hydro_month_start,\n 'hydro_year'] = pd_test['year']\n else:\n pd_test.loc[pd_test.index.month < hydro_month_start,\n 'hydro_year'] = pd_test['year']\n # otherwise, those days with a month>=hydro_month_start\n # belong to the next hydro_year\n pd_test.loc[pd_test.index.month >= hydro_month_start,\n 'hydro_year'] = pd_test['year']+1\n # month_hydro is 1 if it is hydro_month_start\n month_hydro = pd_test['month'].values+(12-hydro_month_start+1)\n month_hydro[month_hydro > 12] += -12\n pd_test['hydro_month'] = month_hydro\n pd_test = pd_test.astype('int')\n self.years = pd_test['hydro_year'].values\n ny = self.years[-1] - self.years[0]+1\n self.months = pd_test['hydro_month'].values\n # Read timeseries and correct it\n self.temp = xr_nc['temp'].values.astype(np.float64) + self._temp_bias\n # this is prcp computed by instantiation\n # this changes if prcp_fac is updated (see @property)\n self.prcp = xr_nc['prcp'].values.astype(np.float64) * self._prcp_fac\n\n\n # lapse rate (temperature gradient)\n if self.grad_type == 'var' or self.grad_type == 'var_an_cycle':\n try:\n grad = xr_nc['gradient'].values.astype(np.float64)\n # Security for stuff that can happen with local gradients\n g_minmax = temp_local_gradient_bounds\n\n # if gradient is not a number, or positive/negative\n # infinity, use the default gradient\n grad = np.where(~np.isfinite(grad), default_grad, grad)\n\n # if outside boundaries of default -0.009 and above\n # -0.003 -> use the boundaries instead\n grad = clip_array(grad, g_minmax[0], g_minmax[1])\n\n if self.grad_type == 'var_an_cycle':\n # if we want constant lapse rates over the years\n # that change over the annual cycle, but not over time\n if self.mb_type == 'mb_real_daily':\n grad_gb = xr_nc['gradient'].groupby('time.month')\n grad = grad_gb.mean().values\n g_minmax = temp_local_gradient_bounds\n\n # if gradient is not a number, or positive/negative\n # infinity, use the default gradient\n grad = np.where(~np.isfinite(grad), default_grad,\n grad)\n\n # if outside boundaries of default -0.009 and above\n # -0.003 -> use the boundaries instead\n grad = clip_array(grad, g_minmax[0], g_minmax[1])\n\n stack_grad = grad.reshape(-1, 12)\n grad = np.tile(stack_grad.mean(axis=0), ny)\n reps_day1 = xr_nc.time[xr_nc.time.dt.day == 1]\n reps = reps_day1.dt.daysinmonth\n grad = np.repeat(grad, reps)\n\n else:\n stack_grad = grad.reshape(-1, 12)\n grad = np.tile(stack_grad.mean(axis=0), ny)\n except KeyError:\n text = ('there is no gradient available in chosen climate'\n 'file, try instead e.g. ERA5_daily or ERA5dr e.g.'\n 'oggm.shop.ecmwf.process_ecmwf_data'\n '(gd, dataset=\"ERA5dr\")')\n\n raise InvalidParamsError(text)\n\n elif self.grad_type == 'cte':\n # if grad_type is chosen cte, we use the default_grad!\n grad = self.prcp * 0 + default_grad\n else:\n raise InvalidParamsError('grad_type can be either cte,'\n 'var or var_an_cycle')\n self.grad = grad\n self.ref_hgt = xr_nc.ref_hgt # xr_nc.uncorrected_ref_hgt\n # ref_hgt\n # if climate dataset has been corrected once again\n # or non corrected reference height!\n try:\n self.uncorrected_ref_hgt = xr_nc.uncorrected_ref_hgt\n except:\n self.uncorrected_ref_hgt = xr_nc.ref_hgt\n # xr_nc.ref_hgt\n\n self.ys = self.years[0] if ys is None else ys\n self.ye = self.years[-1] if ye is None else ye\n\n self.fpath = fpath\n\n # copying Cat class idea ;-)\n # https://fabienmaussion.info/scientific_programming/week_10/01-OOP-Part-1.html\n @property\n def prcp_fac(self):\n ''' prints the _prcp_fac\n '''\n return self._prcp_fac\n\n @prcp_fac.setter\n def prcp_fac(self, new_prcp_fac):\n '''\n '''\n if new_prcp_fac <= 0:\n raise InvalidParamsError('prcp_fac has to be above zero!')\n # attention, prcp_fac should not be called here\n # otherwise there is recursion occurring forever...\n # use new_prcp_fac to not get maximum recusion depth error\n self.prcp *= new_prcp_fac / self._prcp_fac\n # update old prcp_fac in order that it can be updated\n # again ...\n self._prcp_fac = new_prcp_fac\n\n # same for temp_bias:\n @property\n def temp_bias(self):\n return self._temp_bias\n\n @temp_bias.setter\n def temp_bias(self, new_temp_bias):\n self.temp += new_temp_bias - self._temp_bias\n # update old temp_bias in order that it can be updated again ...\n self._temp_bias = new_temp_bias\n\n def historical_climate_qc_mod(self, gdir,\n # t_solid=0, t_liq=2, t_melt=0, prcp_fac=2.5,\n # default_grad=-0.0065,\n # templocal_gradient_bounds=[-0.009, -0.003],\n climate_qc_months=3,\n use_cfg_params=False):\n \"\"\"\"Check the \"quality\" of climate data and correct it if needed.\n\n Similar to historical_climate_qc from oggm.core.climate but checks\n that climate that is used in TIModels directly\n\n This forces the climate data to have at least one month of melt\n per year at the terminus of the glacier (i.e. simply shifting\n temperatures up\n when necessary), and at least one month where accumulation is possible\n at the glacier top (i.e. shifting the temperatures down).\n\n This has a similar effect as introducing a temperature bias\n \"\"\"\n\n # # Parameters\n # if use_cfg_params:\n # temp_s = (cfg.PARAMS['temp_all_liq'] +\n # cfg.PARAMS['temp_all_solid'])/2\n # temp_m = cfg.PARAMS['temp_melt']\n # default_grad = cfg.PARAMS['temp_default_gradient']\n # g_minmax = cfg.PARAMS['temp_local_gradient_bounds']\n # qc_months = cfg.PARAMS['climate_qc_months']\n # else:\n # temp_s = (t_liq + t_solid) / 2\n # temp_m = t_melt\n # default_grad = default_grad\n # g_minmax = temp_local_gradient_bounds\n # if qc_months == 0:\n # return\n\n # Read file\n # if cfg.PARAMS['baseline_climate'] == 'ERA5_daily':\n # filesuffix = '_daily'\n # fpath = gdir.get_filepath('climate_historical'+filesuffix)\n # igrad = None\n # with utils.ncDataset(fpath) as nc:\n # # time\n # # Read timeseries\n # itemp = nc.variables['temp'][:]\n # if 'gradient' in nc.variables:\n # igrad = nc.variables['gradient'][:]\n # # Security for stuff that can happen with local gradients\n # igrad = np.where(~np.isfinite(igrad), default_grad, igrad)\n # igrad = utils.clip_array(igrad, g_minmax[0], g_minmax[1])\n # ref_hgt = nc.ref_hgt\n\n # # Default gradient?\n # if igrad is None:\n # igrad = itemp * 0 + default_grad\n\n # Parameters (from cfg.PARAMS in OGGM defaul\n if self.temp_bias != 0:\n raise InvalidParamsError('either use no temp_bias or do no quality'\n 'check corrections, as they have the '\n 'same effects!')\n fpath = self.fpath\n grad = self.grad\n # get non-corrected quality check\n ref_hgt = self.uncorrected_ref_hgt\n itemp = self.temp\n temp_m = self.t_melt\n temp_s = (self.t_liq + self.t_solid) / 2\n if ('daily' in self._input_filesuffix):\n # different amount of days per year ...\n d_m = 30\n pass\n else:\n d_m = 1\n ny = len(grad) // 12\n assert ny == len(grad) / 12\n\n # Geometry data\n fls = gdir.read_pickle('inversion_flowlines')\n heights = np.array([])\n for fl in fls:\n heights = np.append(heights, fl.surface_h)\n top_h = np.max(heights)\n bot_h = np.min(heights)\n\n # First check - there should be at least \"climate_qc_months\"\n # month of melt every year\n prev_ref_hgt = ref_hgt\n while True:\n # removed default_grad and uses instead grad!\n ts_bot = itemp + grad * (bot_h - ref_hgt)\n # reshape does not work , because of different amount of days\n # per year ...\n pd_ts = pd.DataFrame({'ts_threshold': ts_bot > temp_m,\n 'year': self.years})\n ts_bot = pd_ts.groupby('year').sum()['ts_threshold'].values\n # ts_bot = (ts_bot.reshape((ny, 12)) > temp_m).sum(axis=1)\n if np.all(ts_bot >= climate_qc_months * d_m):\n # Ok all good\n break\n # put ref hgt a bit higher so that we warm things a bit\n ref_hgt += 10\n\n # If we changed this it makes no sense to lower it down again,\n # so resume here:\n if ref_hgt != prev_ref_hgt:\n with utils.ncDataset(fpath, 'a') as nc:\n nc.ref_hgt = ref_hgt\n nc.uncorrected_ref_hgt = prev_ref_hgt\n gdir.add_to_diagnostics('ref_hgt_qc_diff',\n int(ref_hgt - prev_ref_hgt))\n # need to save the new ref_hgt\n self.ref_hgt = ref_hgt\n return\n\n # Second check - there should be at least \"climate_qc_months\"\n # month of acc every year\n while True:\n # grad instead of default_grad\n ts_top = itemp + grad * (top_h - ref_hgt)\n # reshape does not work , because of different amount of days\n # per year ...\n pd_ts = pd.DataFrame({'ts_threshold': ts_top < temp_s,\n 'year': self.years})\n ts_top = pd_ts.groupby('year').sum()['ts_threshold'].values\n # ts_top = (ts_top.reshape((ny, 12)) < temp_s).sum(axis=1)\n if np.all(ts_top >= climate_qc_months * d_m):\n # Ok all good\n break\n # put ref hgt a bit lower so that we cold things a bit\n ref_hgt -= 10\n\n if ref_hgt != prev_ref_hgt:\n with utils.ncDataset(fpath, 'a') as nc:\n nc.ref_hgt = ref_hgt\n nc.uncorrected_ref_hgt = prev_ref_hgt\n gdir.add_to_diagnostics('ref_hgt_qc_diff',\n int(ref_hgt - prev_ref_hgt))\n # need to save the new ref_hgt\n self.ref_hgt = ref_hgt\n return\n\n def _get_climate(self, heights, climate_type, year=None):\n \"\"\"Climate information at given heights.\n year has to be given as float hydro year from what the month is taken,\n hence year 2000 -> y=2000, m = 1, & year = 2000.09, y=2000, m=2 ...\n which corresponds to the real year 1999 an months October or November\n if hydro year starts in October\n\n Note that prcp is corrected with the precipitation factor and that\n all other model biases (temp and prcp) are applied.\n\n same as in OGGM default except that tempformelt is computed by\n self._get_tempformelt\n\n Parameters\n -------\n heights : np.array or list\n heights along flowline\n climate_type : str\n either 'monthly' or 'annual', if annual floor of year is used,\n if monthly float year is converted into month and year\n\n Returns\n -------\n (temp, tempformelt, prcp, prcpsol)\n \"\"\"\n\n y, m = floatyear_to_date(year)\n if self.repeat:\n y = self.ys + (y - self.ys) % (self.ye - self.ys + 1)\n if y < self.ys or y > self.ye:\n raise ValueError('year {} out of the valid time bounds: '\n '[{}, {}]'.format(y, self.ys, self.ye))\n\n if self.mb_type == 'mb_real_daily' or climate_type == 'annual':\n if climate_type == 'annual':\n #if type(year) == float:\n # raise InvalidParamsError('')\n pok = np.where(self.years == year)[0]\n if len(pok) < 1:\n raise ValueError('Year {} not in record'.format(int(year)))\n else:\n pok = np.where((self.years == y) & (self.months == m))[0]\n if len(pok) < 28:\n warnings.warn('something goes wrong with amount of entries\\\n per month for mb_real_daily')\n else:\n pok = np.where((self.years == y) & (self.months == m))[0][0]\n # Read timeseries\n # (already temperature bias and precipitation factor corrected!)\n itemp = self.temp[pok]\n iprcp = self.prcp[pok]\n igrad = self.grad[pok]\n\n # For each height pixel:\n # Compute temp and tempformelt (temperature above melting threshold)\n heights = np.asarray(heights)\n npix = len(heights)\n if self.mb_type == 'mb_real_daily' or climate_type == 'annual':\n grad_temp = np.atleast_2d(igrad).repeat(npix, 0)\n if len(pok) != 12 and self.mb_type != 'mb_real_daily':\n warnings.warn('something goes wrong with amount of entries'\n 'per year')\n grad_temp *= (heights.repeat(len(pok)).reshape(grad_temp.shape) -\n self.ref_hgt)\n temp2d = np.atleast_2d(itemp).repeat(npix, 0) + grad_temp\n\n # temp_for_melt is computed separately depending on mb_type\n temp2dformelt = self._get_tempformelt(temp2d, pok)\n\n # Compute solid precipitation from total precipitation\n prcp = np.atleast_2d(iprcp).repeat(npix, 0)\n fac = 1 - (temp2d - self.t_solid) / (self.t_liq - self.t_solid)\n prcpsol = prcp * clip_array(fac, 0, 1)\n return temp2d, temp2dformelt, prcp, prcpsol\n\n else:\n temp = np.ones(npix) * itemp + igrad * (heights - self.ref_hgt)\n\n # temp_for_melt is computed separately depending on mb_type\n tempformelt = self._get_tempformelt(temp, pok)\n prcp = np.ones(npix) * iprcp\n fac = 1 - (temp - self.t_solid) / (self.t_liq - self.t_solid)\n prcpsol = prcp * clip_array(fac, 0, 1)\n\n return temp, tempformelt, prcp, prcpsol\n\n def _get_2d_monthly_climate(self, heights, year=None):\n # first get the climate data\n Warning('Attention: this has not been tested enough to be sure that '\n 'it works')\n if self.mb_type == 'mb_real_daily':\n return self._get_climate(heights, 'monthly', year=year)\n else:\n raise InvalidParamsError('_get_2d_monthly_climate works only\\\n with mb_real_daily as mb_type!!!')\n\n\n\n def get_monthly_climate(self, heights, year=None):\n # first get the climate data\n Warning('Attention: this has not been tested enough to be sure that \\\n it works')\n if self.mb_type == 'mb_real_daily':\n t, tfmelt, prcp, prcpsol = self._get_climate(heights, 'monthly',\n year=year)\n return (t.mean(axis=1), tfmelt.sum(axis=1),\n prcp.sum(axis=1), prcpsol.sum(axis=1))\n else:\n return self._get_climate(heights, 'monthly', year=year)\n # if it is mb_real_daily, the data has daily resolution\n\n def get_daily_climate(self, heights, year = None):\n raise NotImplementedError('look at _get_2d_daily_climate instead')\n\n def _get_2d_annual_climate(self, heights, year):\n return self._get_climate(heights, 'annual', year=year)\n\n def _get_2d_daily_climate(self, heights, year = None):\n return self._get_climate(heights, 'annual', year=year)\n # If I also want to use this outside of the class because\n # (e.g. in climate.py), I have to change this again and remove the self...\n # and somehow there is aproblem if I put not self in\n # _get_tempformelt when it is inside the class\n\n def _get_tempformelt(self, temp, pok):\n \"\"\" Helper function to compute tempformelt to avoid code duplication\n in get_monthly_climate() and _get2d_annual_climate()\n\n If using this again outside of this class, need to remove the \"self\",\n such as for 'mb_climate_on_height' in climate.py, that has no self....\n (would need to change temp, t_melt ,temp_std, mb_type, N, loop)\n\n Input: stuff that is different for the different methods\n temp: temperature time series\n pok: indices of time series\n\n Returns\n -------\n (tempformelt)\n \"\"\"\n\n tempformelt_without_std = temp - self.t_melt\n\n # computations change only if 'mb_daily' as mb_type!\n if self.mb_type == 'mb_monthly' or self.mb_type == 'mb_real_daily':\n tempformelt = tempformelt_without_std\n elif self.mb_type == 'mb_daily':\n\n itemp_std = self.temp_std[pok]\n\n tempformelt_with_std = np.full(np.shape(tempformelt_without_std),\n np.NaN)\n # matrix with N values that are distributed around 0\n # showing how much fake 'daily' values vary from the mean\n z_scores_mean = stats.norm.ppf(np.arange(1/self.N-1/(2*self.N),\n 1, 1/self.N))\n\n z_std = np.matmul(np.atleast_2d(z_scores_mean).T,\n np.atleast_2d(itemp_std))\n\n # there are two possibilities,\n # not using the loop is most of the times faster\n if self.loop is False:\n # without the loop: but not much faster ..\n tempformelt_daily = np.atleast_3d(tempformelt_without_std).T + \\\n np.atleast_3d(z_std)\n clip_min(tempformelt_daily, 0, out=tempformelt_daily)\n tempformelt_with_std = tempformelt_daily.mean(axis=0).T\n else:\n shape_tfm = np.shape(tempformelt_without_std)\n tempformelt_with_std = np.full(shape_tfm, np.NaN)\n for h in np.arange(0, np.shape(tempformelt_without_std)[0]):\n h_tfm_daily_ = np.atleast_2d(tempformelt_without_std[h, :])\n h_tempformelt_daily = h_tfm_daily_ + z_std\n clip_min(h_tempformelt_daily, 0, out=h_tempformelt_daily)\n h_tempformelt_monthly = h_tempformelt_daily.mean(axis=0)\n tempformelt_with_std[h, :] = h_tempformelt_monthly\n tempformelt = tempformelt_with_std\n\n else:\n raise InvalidParamsError('mb_type can only be \"mb_monthly,\\\n mb_daily or mb_real_daily\" ')\n # replace all values below zero to zero\n clip_min(tempformelt, 0, out=tempformelt)\n\n return tempformelt\n\n # same as in OGGM default\n def get_annual_climate(self, heights, year=None):\n \"\"\"Annual climate information at given heights.\n\n Note that prcp is corrected with the precipitation factor and that\n all other model biases (temp and prcp) are applied.\n\n Returns\n -------\n (temp, tempformelt, prcp, prcpsol)\n \"\"\"\n t, tfmelt, prcp, prcpsol = self._get_2d_annual_climate(heights, year)\n return (t.mean(axis=1), tfmelt.sum(axis=1),\n prcp.sum(axis=1), prcpsol.sum(axis=1))\n\n def get_monthly_mb(self, heights, year=None, **kwargs):\n \"\"\" computes annual mass balance in kg /m² /second\n\n Attention year is here in hydro float year\n\n year has to be given as float hydro year from what the month is taken,\n hence year 2000 -> y=2000, m = 1, & year = 2000.09, y=2000, m=2 ...\n which corresponds to the real year 1999 an months October or November\n if hydro year starts in October\n \"\"\"\n # get_monthly_mb and get_annual_mb are only different\n # to OGGM default for mb_real_daily\n\n if self.mb_type == 'mb_real_daily':\n # get 2D values, dependencies on height and time (days)\n out = self._get_2d_monthly_climate(heights, year)\n _, temp2dformelt, _, prcpsol = out\n #(days per month)\n dom = 365.25/12 # len(prcpsol.T)\n # attention, I should not use the days of years as the melt_f is\n # per month ~mean days of that year 12/daysofyear\n # to have the same unit of melt_f, which is\n # the monthly temperature sensitivity (kg /m² /mth /K),\n mb_daily = prcpsol - (self.melt_f/dom) * temp2dformelt\n\n mb_month = np.sum(mb_daily, axis=1)\n # more correct than using a mean value for days in a month\n warnings.warn('get_monthly_mb has not been tested enough,'\n ' there might be a problem with SEC_IN_MONTH'\n '.., see test_monthly_glacier_massbalance()')\n\n else:\n # get 1D values for each height, no dependency on days\n _, tmelt, _, prcpsol = self.get_monthly_climate(heights, year=year)\n mb_month = prcpsol - self.melt_f * tmelt\n\n # residual is in mm w.e per year, so SEC_IN_MONTH .. but mb_month\n # shoud be per month!\n mb_month -= self.residual * self.SEC_IN_MONTH / self.SEC_IN_YEAR\n # this is for mb_daily otherwise it gives the wrong shape\n mb_month = mb_month.flatten()\n # instead of SEC_IN_MONTH, use instead len(prcpsol.T)==daysinmonth\n return mb_month / self.SEC_IN_MONTH / self.rho\n\n def get_annual_mb(self, heights, year=None, **kwargs):\n \"\"\" computes annual mass balance in kg /m² /second \"\"\"\n # get_monthly_mb and get_annual_mb are only different\n # to OGGM default for mb_real_daily\n\n _, temp2dformelt, _, prcpsol = self._get_2d_annual_climate(heights,\n year)\n # *12/daysofthisyear in order to have the same unit of melt_f, which\n # is the monthly temperature sensitivity (kg /m² /mth /K),\n if self.mb_type == 'mb_real_daily':\n # in this case we have the temp2dformelt for each day\n # but self.melt_f is in per month -> divide trough days/month\n # more correct than using a mean value for days in a year\n fact = 12/365.25\n # len(prcpsol.T): make it more consistent as melt_f is described\n # per month independent of which month it is ...\n else:\n fact = 1 # eventually correct here with 365.25\n mb_annual = np.sum(prcpsol - self.melt_f * temp2dformelt*fact,\n axis=1)\n return (mb_annual - self.residual) / self.SEC_IN_YEAR / self.rho\n\n def get_daily_mb(self, heights, year = None):\n #, m = None,\n #float_year=None, **kwargs):\n \"\"\"computes daily mass balance in kg/m2/second\n\n year has to be given as float hydro year from what the month is taken,\n hence year 2000 -> y=2000, m = 1, & year = 2000.09, y=2000, m=2 ...\n which corresponds to the real year 1999 an months October or November\n if hydro year starts in October\n\n \"\"\"\n\n # todo: make this more user friendly\n if type(year)==float:\n raise InvalidParamsError('here year has to be the integer year')\n else:\n pass\n #if y==None:\n # year = date_to_floatyear(y, m)\n ##elif y==None and m!=None:\n # raise InvalidParamsError('give y or only give float_year'\n # 'and no month m')\n #else:\n # year = float_year\n if self.mb_type == 'mb_real_daily':\n # get 2D values, dependencies on height and time (days)\n out = self._get_2d_daily_climate(heights, year)\n _, temp2dformelt, _, prcpsol = out\n # days of year\n doy = 365.25 #len(prcpsol.T)\n # assert doy > 360\n # to have the same unit of melt_f, which is\n # the monthly temperature sensitivity (kg /m² /mth /K),\n melt_f_daily = self.melt_f * 12/doy\n mb_daily = prcpsol - melt_f_daily * temp2dformelt\n\n # mb_month = np.sum(mb_daily, axis=1)\n # more correct than using a mean value for days in a month\n warnings.warn('get_daily_mb has not been tested enough,')\n\n # residual is in mm w.e per year, so SEC_IN_MONTH .. but mb_daily\n # is per day!\n mb_daily -= self.residual * self.SEC_IN_DAY / self.SEC_IN_YEAR\n # this is for mb_daily otherwise it gives the wrong shape\n # mb_daily = mb_month.flatten()\n # instead of SEC_IN_MONTH, use instead len(prcpsol.T)==daysinmonth\n return mb_daily / self.SEC_IN_DAY / self.rho\n else:\n raise InvalidParamsError('get_daily_mb works only with'\n 'mb_real_daily as mb_type!')\n\n def get_specific_daily_mb(self, heights=None, widths=None, year=None):\n \" returns specific daily mass balance in kg m-2 day \"\n if len(np.atleast_1d(year)) > 1:\n out = [self.get_specific_daily_mb(heights=heights, widths=widths,\n year=yr) for yr in year]\n return np.asarray(out)\n\n mb = self.get_daily_mb(heights, year=year)\n spec_mb = np.average(mb * self.rho * SEC_IN_DAY, weights=widths, axis=0)\n assert len(spec_mb) > 360\n return spec_mb\n\n# copy of MultipleFlowlineMassBalance that works with TIModel\nclass MultipleFlowlineMassBalance_TIModel(MassBalanceModel):\n \"\"\" TODO: adapt this: main changes are mu_star -> melt_f\n Handle mass-balance at the glacier level instead of flowline level.\n\n Convenience class doing not much more than wrapping a list of mass-balance\n models, one for each flowline.\n\n This is useful for real-case studies, where each flowline might have a\n different mu*.\n\n Attributes\n ----------\n fls : list\n list of flowline objects\n mb_models : list\n list of mass-balance objects\n \"\"\"\n\n def __init__(self, gdir, fls=None, melt_f=None, prcp_fac=None,\n mb_model_class=TIModel, use_inversion_flowlines=False,\n input_filesuffix='', bias=0,\n **kwargs):\n \"\"\"Initialize.\n\n Parameters\n ----------\n gdir : GlacierDirectory\n the glacier directory\n mu_star : float or list of floats, optional\n set to the alternative value of mu* you want to use\n (the default is to use the calibrated value). Give a list of values\n for flowline-specific mu*\n fls : list, optional\n list of flowline objects to use (defaults to 'model_flowlines',\n and if not available, to 'inversion_flowlines')\n mb_model_class : class, optional\n the mass-balance model to use (e.g. PastMassBalance,\n ConstantMassBalance...)\n use_inversion_flowlines: bool, optional\n if True 'inversion_flowlines' instead of 'model_flowlines' will be\n used.\n input_filesuffix : str\n the file suffix of the input climate file\n bias : float, optional\n set to the alternative value of the calibration bias [mm we yr-1]\n you want to use (the default is to use the calibrated value)\n Note that this bias is *substracted* from the computed MB. Indeed:\n BIAS = MODEL_MB - REFERENCE_MB.\n kwargs : kwargs to pass to mb_model_class\n \"\"\"\n\n # Read in the flowlines\n if use_inversion_flowlines:\n fls = gdir.read_pickle('inversion_flowlines')\n\n if fls is None:\n try:\n fls = gdir.read_pickle('model_flowlines')\n except FileNotFoundError:\n raise InvalidWorkflowError('Need a valid `model_flowlines` '\n 'file. If you explicitly want to '\n 'use `inversion_flowlines`, set '\n 'use_inversion_flowlines=True.')\n\n self.fls = fls\n _y0 = kwargs.get('y0', None)\n\n\n # Initialise the mb models\n self.flowline_mb_models = []\n for fl in self.fls:\n # Merged glaciers will need different climate files, use filesuffix\n if (fl.rgi_id is not None) and (fl.rgi_id != gdir.rgi_id):\n rgi_filesuffix = '_' + fl.rgi_id + input_filesuffix\n else:\n rgi_filesuffix = input_filesuffix\n\n # merged glaciers also have a different MB bias from calibration\n if ((bias is None) and cfg.PARAMS['use_bias_for_run'] and\n (fl.rgi_id != gdir.rgi_id)):\n df = gdir.read_json('local_mustar', filesuffix='_' + fl.rgi_id)\n fl_bias = df['bias']\n else:\n fl_bias = bias\n\n # Constant and RandomMassBalance need y0 if not provided\n #if (issubclass(mb_model_class, RandomMassBalance) or\n # issubclass(mb_model_class, ConstantMassBalance)) and (\n # fl.rgi_id != gdir.rgi_id) and (_y0 is None):#\n\n # df = gdir.read_json('local_mustar', filesuffix='_' + fl.rgi_id)\n # kwargs['y0'] = df['t_star']\n\n if (issubclass(mb_model_class, TIModel)):\n self.flowline_mb_models.append(\n mb_model_class(gdir, melt_f, prcp_fac = prcp_fac,\n residual=fl_bias, baseline_climate=rgi_filesuffix,\n **kwargs))\n else:\n self.flowline_mb_models.append(\n mb_model_class(gdir, mu_star=fl.mu_star, bias=fl_bias,\n input_filesuffix=rgi_filesuffix, **kwargs))\n\n self.valid_bounds = self.flowline_mb_models[-1].valid_bounds\n self.hemisphere = gdir.hemisphere\n\n @property\n def temp_bias(self):\n \"\"\"Temperature bias to add to the original series.\"\"\"\n return self.flowline_mb_models[0].temp_bias\n\n @temp_bias.setter\n def temp_bias(self, value):\n \"\"\"Temperature bias to add to the original series.\"\"\"\n for mbmod in self.flowline_mb_models:\n mbmod.temp_bias = value\n\n @property\n def prcp_fac(self):\n \"\"\"Precipitation factor to apply to the original series.\"\"\"\n return self.flowline_mb_models[0].prcp_fac\n\n @prcp_fac.setter\n def prcp_fac(self, value):\n \"\"\"Precipitation factor to apply to the original series.\"\"\"\n for mbmod in self.flowline_mb_models:\n mbmod.prcp_fac = value\n\n @property\n def bias(self):\n \"\"\"Residual bias to apply to the original series.\"\"\"\n return self.flowline_mb_models[0].bias\n\n @bias.setter\n def bias(self, value):\n \"\"\"Residual bias to apply to the original series.\"\"\"\n for mbmod in self.flowline_mb_models:\n mbmod.bias = value\n\n def get_monthly_mb(self, heights, year=None, fl_id=None, **kwargs):\n\n if fl_id is None:\n raise ValueError('`fl_id` is required for '\n 'MultipleFlowlineMassBalance!')\n\n return self.flowline_mb_models[fl_id].get_monthly_mb(heights,\n year=year)\n\n def get_annual_mb(self, heights, year=None, fl_id=None, **kwargs):\n\n if fl_id is None:\n raise ValueError('`fl_id` is required for '\n 'MultipleFlowlineMassBalance!')\n\n return self.flowline_mb_models[fl_id].get_annual_mb(heights,\n year=year)\n\n def get_annual_mb_on_flowlines(self, fls=None, year=None):\n \"\"\"Get the MB on all points of the glacier at once.\n\n Parameters\n ----------\n fls: list, optional\n the list of flowlines to get the mass-balance from. Defaults\n to self.fls\n year: float, optional\n the time (in the \"floating year\" convention)\n Returns\n -------\n Tuple of (heights, widths, mass_balance) 1D arrays\n \"\"\"\n\n if fls is None:\n fls = self.fls\n\n heights = []\n widths = []\n mbs = []\n for i, fl in enumerate(fls):\n h = fl.surface_h\n heights = np.append(heights, h)\n widths = np.append(widths, fl.widths)\n mbs = np.append(mbs, self.get_annual_mb(h, year=year, fl_id=i))\n\n return heights, widths, mbs\n\n def get_specific_mb(self, heights=None, widths=None, fls=None,\n year=None):\n\n if heights is not None or widths is not None:\n raise ValueError('`heights` and `widths` kwargs do not work with '\n 'MultipleFlowlineMassBalance!')\n\n if fls is None:\n fls = self.fls\n\n if len(np.atleast_1d(year)) > 1:\n out = [self.get_specific_mb(fls=fls, year=yr) for yr in year]\n return np.asarray(out)\n\n mbs = []\n widths = []\n for i, (fl, mb_mod) in enumerate(zip(self.fls, self.flowline_mb_models)):\n _widths = fl.widths\n try:\n # For rect and parabola don't compute spec mb\n _widths = np.where(fl.thick > 0, _widths, 0)\n except AttributeError:\n pass\n widths = np.append(widths, _widths)\n mb = mb_mod.get_annual_mb(fl.surface_h, year=year, fls=fls, fl_id=i)\n mbs = np.append(mbs, mb * SEC_IN_YEAR * mb_mod.rho)\n\n return np.average(mbs, weights=widths)\n\n def get_ela(self, year=None, **kwargs):\n\n # ELA here is not without ambiguity.\n # We compute a mean weighted by area.\n\n if len(np.atleast_1d(year)) > 1:\n return np.asarray([self.get_ela(year=yr) for yr in year])\n\n elas = []\n areas = []\n for fl_id, (fl, mb_mod) in enumerate(zip(self.fls,\n self.flowline_mb_models)):\n elas = np.append(elas, mb_mod.get_ela(year=year, fl_id=fl_id,\n fls=self.fls))\n areas = np.append(areas, np.sum(fl.widths))\n\n return np.average(elas, weights=areas)\n\n\n@entity_task(log)\ndef fixed_geometry_mass_balance_TIModel(gdir, ys=None, ye=None, years=None,\n monthly_step=False,\n use_inversion_flowlines=True,\n climate_filename='climate_historical',\n climate_input_filesuffix='',\n ds_gcm = None,\n **kwargs):\n \"\"\"Computes the mass-balance with climate input from e.g. CRU or a GCM.\n\n Parameters\n ----------\n gdir : :py:class:`oggm.GlacierDirectory`\n the glacier directory to process\n ys : int\n start year of the model run (default: from the climate file)\n date)\n ye : int\n end year of the model run (default: from the climate file)\n years : array of ints\n override ys and ye with the years of your choice\n monthly_step : bool\n whether to store the diagnostic data at a monthly time step or not\n (default is yearly)\n use_inversion_flowlines : bool\n whether to use the inversion flowlines or the model flowlines\n climate_filename : str\n name of the climate file, e.g. 'climate_historical' (default) or\n 'gcm_data'\n climate_input_filesuffix: str\n filesuffix for the input climate file\n **kwargs:\n added to MultipleFlowlineMassBalance_TIModel\n \"\"\"\n\n if monthly_step:\n raise NotImplementedError('monthly_step not implemented yet')\n if ds_gcm != None:\n melt_f = ds_gcm.sel(rgi_id=gdir.rgi_id).melt_f.values\n pf = ds_gcm.sel(rgi_id=gdir.rgi_id).pf.values\n\n mb = MultipleFlowlineMassBalance_TIModel(gdir, mb_model_class=TIModel,\n filename=climate_filename,\n use_inversion_flowlines=use_inversion_flowlines,\n input_filesuffix=climate_input_filesuffix,\n melt_f=melt_f, prcp_fac=pf,\n **kwargs)\n else:\n mb = MultipleFlowlineMassBalance_TIModel(gdir, mb_model_class=TIModel,\n filename=climate_filename,\n use_inversion_flowlines=use_inversion_flowlines,\n input_filesuffix=climate_input_filesuffix,\n **kwargs)\n\n if years is None:\n if ys is None:\n ys = mb.flowline_mb_models[0].ys\n if ye is None:\n ye = mb.flowline_mb_models[0].ye\n years = np.arange(ys, ye + 1)\n\n\n odf = pd.Series(data=mb.get_specific_mb(year=years),\n index=years)\n return odf\n\n\nfrom oggm.utils._workflow import global_task\n@global_task(log)\ndef compile_fixed_geometry_mass_balance_TIModel(gdirs, filesuffix='',\n path=True, csv=False,\n use_inversion_flowlines=True,\n ys=None, ye=None, years=None,\n ds_gcm=None,\n **kwargs):\n \"\"\"Compiles a table of specific mass-balance timeseries for all glaciers.\n\n The file is stored in a hdf file (not csv) per default. Use pd.read_hdf\n to open it.\n\n Parameters\n ----------\n gdirs : list of :py:class:`oggm.GlacierDirectory` objects\n the glacier directories to process\n filesuffix : str\n add suffix to output file\n path : str, bool\n Set to \"True\" in order to store the info in the working directory\n Set to a path to store the file to your chosen location (file\n extension matters)\n csv: bool\n Set to store the data in csv instead of hdf.\n use_inversion_flowlines : bool\n whether to use the inversion flowlines or the model flowlines\n ys : int\n start year of the model run (default: from the climate file)\n date)\n ye : int\n end year of the model run (default: from the climate file)\n years : array of ints\n override ys and ye with the years of your choice\n \"\"\"\n from oggm.workflow import execute_entity_task\n #from oggm.core.massbalance import fixed_geometry_mass_balance\n\n out_df = execute_entity_task(fixed_geometry_mass_balance_TIModel, gdirs,\n use_inversion_flowlines=use_inversion_flowlines,\n ys=ys, ye=ye, years=years,\n ds_gcm=ds_gcm, **kwargs)\n\n for idx, s in enumerate(out_df):\n if s is None:\n out_df[idx] = pd.Series(np.NaN)\n\n out = pd.concat(out_df, axis=1, keys=[gd.rgi_id for gd in gdirs])\n out = out.dropna(axis=0, how='all')\n\n if path:\n if path is True:\n fpath = os.path.join(cfg.PATHS['working_dir'],\n 'fixed_geometry_mass_balance' + filesuffix)\n if csv:\n out.to_csv(fpath + '.csv')\n else:\n out.to_hdf(fpath + '.hdf', key='df')\n else:\n ext = os.path.splitext(path)[-1]\n if ext.lower() == '.csv':\n out.to_csv(path)\n elif ext.lower() == '.hdf':\n out.to_hdf(path, key='df')\n return out\n\n\ndef extend_past_climate_run_TIModel(past_run_file=None,\n fixed_geometry_mb_file=None,\n glacier_statistics_file=None,\n path=False,\n use_compression=True):\n \"\"\"Utility function to extend past MB runs prior to the RGI date.\n\n We use a fixed geometry (and a fixed calving rate) for all dates prior\n to the RGI date.\n\n This is not parallelized, i.e a bit slow.\n\n Parameters\n ----------\n past_run_file : str\n path to the historical run (nc)\n fixed_geometry_mb_file : str\n path to the MB file (csv)\n glacier_statistics_file : str\n path to the glacier stats file (csv)\n path : str\n where to store the file\n use_compression : bool\n\n Returns\n -------\n the extended dataset\n \"\"\"\n\n log.workflow('Applying extend_past_climate_run on '\n '{}'.format(past_run_file))\n\n fixed_geometry_mb_df = pd.read_csv(fixed_geometry_mb_file, index_col=0,\n low_memory=False)\n stats_df = pd.read_csv(glacier_statistics_file, index_col=0,\n low_memory=False)\n\n with xr.open_dataset(past_run_file) as past_ds:\n\n # We need at least area and vol to do something\n if 'volume' not in past_ds.data_vars or 'area' not in past_ds.data_vars:\n raise InvalidWorkflowError('Need both volume and area to proceed')\n\n y0_run = int(past_ds.time[0])\n y1_run = int(past_ds.time[-1])\n if (y1_run - y0_run + 1) != len(past_ds.time):\n raise NotImplementedError('Currently only supports annual outputs')\n y0_clim = int(fixed_geometry_mb_df.index[0])\n y1_clim = int(fixed_geometry_mb_df.index[-1])\n if y0_clim > y0_run or y1_clim < y0_run:\n raise InvalidWorkflowError('Dates do not match.')\n if y1_clim != y1_run - 1:\n raise InvalidWorkflowError('Dates do not match.')\n if len(past_ds.rgi_id) != len(fixed_geometry_mb_df.columns):\n raise InvalidWorkflowError('Nb of glaciers do not match.')\n if len(past_ds.rgi_id) != len(stats_df.index):\n raise InvalidWorkflowError('Nb of glaciers do not match.')\n\n # Make sure we agree on order\n df = fixed_geometry_mb_df[past_ds.rgi_id]\n\n # Output data\n years = np.arange(y0_clim, y1_run+1)\n ods = past_ds.reindex({'time': years})\n\n # Time\n ods['hydro_year'].data[:] = years\n ods['hydro_month'].data[:] = ods['hydro_month'][-1]\n if ods['hydro_month'][-1] == 1:\n ods['calendar_year'].data[:] = years\n else:\n ods['calendar_year'].data[:] = years - 1\n ods['calendar_month'].data[:] = ods['calendar_month'][-1]\n for vn in ['hydro_year', 'hydro_month',\n 'calendar_year', 'calendar_month']:\n ods[vn] = ods[vn].astype(int)\n\n # New vars\n for vn in ['volume', 'volume_bsl', 'volume_bwl',\n 'area', 'length', 'calving', 'calving_rate']:\n if vn in ods.data_vars:\n ods[vn + '_ext'] = ods[vn].copy(deep=True)\n ods[vn + '_ext'].attrs['description'] += ' (extended with MB data)'\n\n vn = 'volume_fixed_geom_ext'\n ods[vn] = ods['volume'].copy(deep=True)\n ods[vn].attrs['description'] += ' (replaced with fixed geom data)'\n\n rho = cfg.PARAMS['ice_density']\n # Loop over the ids\n for i, rid in enumerate(ods.rgi_id.data):\n # Both do not need to be same length but they need to start same\n mb_ts = df.values[:, i]\n orig_vol_ts = ods.volume_ext.data[:, i]\n if not (np.isfinite(mb_ts[-1]) and np.isfinite(orig_vol_ts[-1])):\n # Not a valid glacier\n continue\n if np.isfinite(orig_vol_ts[0]):\n # Nothing to extend, really\n continue\n\n # First valid id\n fid = np.argmax(np.isfinite(orig_vol_ts))\n\n # Add calving to the mix\n try:\n calv_flux = stats_df.loc[rid, 'calving_flux'] * 1e9\n calv_rate = stats_df.loc[rid, 'calving_rate_myr']\n except KeyError:\n calv_flux = 0\n calv_rate = 0\n if not np.isfinite(calv_flux):\n calv_flux = 0\n if not np.isfinite(calv_rate):\n calv_rate = 0\n\n # Fill area and length which stays constant before date\n orig_area_ts = ods.area_ext.data[:, i]\n orig_area_ts[:fid] = orig_area_ts[fid]\n\n # We convert SMB to volume\n mb_vol_ts = (mb_ts / rho * orig_area_ts[fid] - calv_flux).cumsum()\n calv_ts = (mb_ts * 0 + calv_flux).cumsum()\n\n # The -1 is because the volume change is known at end of year\n mb_vol_ts = mb_vol_ts + orig_vol_ts[fid] - mb_vol_ts[fid-1]\n\n # Now back to netcdf\n ods.volume_fixed_geom_ext.data[1:, i] = mb_vol_ts\n ods.volume_ext.data[1:fid, i] = mb_vol_ts[0:fid-1]\n ods.area_ext.data[:, i] = orig_area_ts\n\n # Optional variables\n if 'length' in ods.data_vars:\n orig_length_ts = ods.length_ext.data[:, i]\n orig_length_ts[:fid] = orig_length_ts[fid]\n ods.length_ext.data[:, i] = orig_length_ts\n\n if 'calving' in ods.data_vars:\n orig_calv_ts = ods.calving_ext.data[:, i]\n # The -1 is because the volume change is known at end of year\n calv_ts = calv_ts + orig_calv_ts[fid] - calv_ts[fid-1]\n ods.calving_ext.data[1:fid, i] = calv_ts[0:fid-1]\n\n if 'calving_rate' in ods.data_vars:\n orig_calv_rate_ts = ods.calving_rate_ext.data[:, i]\n # +1 because calving rate at year 0 is unkown from the dyns model\n orig_calv_rate_ts[:fid+1] = calv_rate\n ods.calving_rate_ext.data[:, i] = orig_calv_rate_ts\n\n # Extend vol bsl by assuming that % stays constant\n if 'volume_bsl' in ods.data_vars:\n bsl = ods.volume_bsl.data[fid, i] / ods.volume.data[fid, i]\n ods.volume_bsl_ext.data[:fid, i] = bsl * ods.volume_ext.data[:fid, i]\n if 'volume_bwl' in ods.data_vars:\n bwl = ods.volume_bwl.data[fid, i] / ods.volume.data[fid, i]\n ods.volume_bwl_ext.data[:fid, i] = bwl * ods.volume_ext.data[:fid, i]\n\n # Remove old vars\n for vn in list(ods.data_vars):\n if '_ext' not in vn and 'time' in ods[vn].dims:\n del ods[vn]\n\n # Rename vars to their old names\n ods = ods.rename(dict((o, o.replace('_ext', ''))\n for o in ods.data_vars))\n\n # Remove t0 (which is NaN)\n ods = ods.isel(time=slice(1, None))\n\n # To file?\n if path:\n enc_var = {'dtype': 'float32'}\n if use_compression:\n enc_var['complevel'] = 5\n enc_var['zlib'] = True\n encoding = {v: enc_var for v in ods.data_vars}\n ods.to_netcdf(path, encoding=encoding)\n\n return ods\n", "sub_path": "MBsandbox/mbmod_daily_oneflowline.py", "file_name": "mbmod_daily_oneflowline.py", "file_ext": "py", "file_size_in_byte": 84043, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 39, "usage_type": "call"}, {"api_name": "oggm.shop.ecmwf.BASENAMES", "line_number": 49, "usage_type": "name"}, {"api_name": "oggm.shop.ecmwf.BASENAMES", "line_number": 55, "usage_type": "name"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 113, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 125, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 128, "usage_type": "call"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 130, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 130, "usage_type": "name"}, {"api_name": "pandas.DatetimeIndex", "line_number": 136, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 149, "usage_type": "call"}, {"api_name": "oggm.utils.ncDataset", "line_number": 151, "usage_type": "call"}, {"api_name": "oggm.utils._funcs.haversine", "line_number": 155, "usage_type": "call"}, {"api_name": "netCDF4.date2num", "line_number": 177, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 181, "usage_type": "attribute"}, {"api_name": "netCDF4.date2num", "line_number": 182, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 204, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 218, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 224, "usage_type": "call"}, {"api_name": "oggm.shop.ecmwf.BASENAMES", "line_number": 293, "usage_type": "name"}, {"api_name": "oggm.shop.ecmwf.BASENAMES", "line_number": 294, "usage_type": "name"}, {"api_name": "oggm.shop.ecmwf.BASENAMES", "line_number": 295, "usage_type": "name"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 298, "usage_type": "call"}, {"api_name": "oggm.shop.ecmwf.get_ecmwf_file", "line_number": 299, "usage_type": "call"}, {"api_name": "oggm.shop.ecmwf.get_ecmwf_file", "line_number": 300, "usage_type": "call"}, {"api_name": "oggm.shop.ecmwf.get_ecmwf_file", "line_number": 301, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 308, "usage_type": "call"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 313, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 313, "usage_type": "name"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 315, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 315, "usage_type": "name"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 326, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 355, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 369, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 404, "usage_type": "call"}, {"api_name": "oggm.shop.ecmwf.get_ecmwf_file", "line_number": 422, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 445, "usage_type": "call"}, {"api_name": "oggm.entity_task", "line_number": 245, "usage_type": "call"}, {"api_name": "oggm.shop.ecmwf.BASENAMES", "line_number": 509, "usage_type": "name"}, {"api_name": "oggm.shop.ecmwf.get_ecmwf_file", "line_number": 511, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 515, "usage_type": "call"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 520, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 520, "usage_type": "name"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 522, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 522, "usage_type": "name"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 533, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 561, "usage_type": "call"}, {"api_name": "oggm.shop.ecmwf.get_ecmwf_file", "line_number": 561, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 583, "usage_type": "call"}, {"api_name": "oggm.shop.ecmwf.BASENAMES", "line_number": 589, "usage_type": "name"}, {"api_name": "oggm.shop.ecmwf.get_ecmwf_file", "line_number": 591, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 592, "usage_type": "call"}, {"api_name": "oggm.cfg.G", "line_number": 603, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 603, "usage_type": "name"}, {"api_name": "oggm.shop.ecmwf.get_ecmwf_file", "line_number": 608, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 609, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 628, "usage_type": "call"}, {"api_name": "oggm.entity_task", "line_number": 467, "usage_type": "call"}, {"api_name": "oggm.core.massbalance.MassBalanceModel", "line_number": 645, "usage_type": "name"}, {"api_name": "oggm.cfg.SEC_IN_YEAR", "line_number": 659, "usage_type": "name"}, {"api_name": "oggm.cfg.SEC_IN_MONTH", "line_number": 660, "usage_type": "name"}, {"api_name": "oggm.cfg.SEC_IN_DAY", "line_number": 661, "usage_type": "name"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 752, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 758, "usage_type": "call"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 776, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 776, "usage_type": "name"}, {"api_name": "oggm.cfg.SEC_IN_YEAR", "line_number": 782, "usage_type": "name"}, {"api_name": "oggm.cfg.SEC_IN_MONTH", "line_number": 783, "usage_type": "name"}, {"api_name": "oggm.cfg.SEC_IN_DAY", "line_number": 784, "usage_type": "name"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 816, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 821, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 827, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 831, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 834, "usage_type": "attribute"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 840, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 850, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 850, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 852, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 852, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 858, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 862, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 887, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 890, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 896, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 902, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 902, "usage_type": "call"}, {"api_name": "oggm.utils.clip_array", "line_number": 906, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 918, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 918, "usage_type": "call"}, {"api_name": "oggm.utils.clip_array", "line_number": 923, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 926, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 929, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 933, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 940, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 946, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 977, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 1055, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1076, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 1078, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 1079, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 1080, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1090, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 1094, "usage_type": "call"}, {"api_name": "oggm.utils.ncDataset", "line_number": 1103, "usage_type": "call"}, {"api_name": "oggm.utils", "line_number": 1103, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 1119, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 1123, "usage_type": "call"}, {"api_name": "oggm.utils.ncDataset", "line_number": 1130, "usage_type": "call"}, {"api_name": "oggm.utils", "line_number": 1130, "usage_type": "name"}, {"api_name": "oggm.utils.floatyear_to_date", "line_number": 1165, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1176, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1180, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 1182, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1185, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 1194, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 1197, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 1199, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 1203, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 1209, "usage_type": "call"}, {"api_name": "oggm.utils.clip_array", "line_number": 1211, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 1215, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 1219, "usage_type": "call"}, {"api_name": "oggm.utils.clip_array", "line_number": 1221, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 1232, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 1289, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1289, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 1290, "usage_type": "attribute"}, {"api_name": "scipy.stats.norm.ppf", "line_number": 1293, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 1293, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 1293, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 1293, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 1296, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 1296, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 1297, "usage_type": "call"}, {"api_name": "numpy.atleast_3d", "line_number": 1303, "usage_type": "call"}, {"api_name": "numpy.atleast_3d", "line_number": 1304, "usage_type": "call"}, {"api_name": "oggm.utils.clip_min", "line_number": 1305, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1308, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 1309, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 1309, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 1310, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 1310, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 1311, "usage_type": "call"}, {"api_name": "oggm.utils.clip_min", "line_number": 1313, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 1319, "usage_type": "call"}, {"api_name": "oggm.utils.clip_min", "line_number": 1322, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1366, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 1368, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1403, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 1421, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 1445, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidParamsError", "line_number": 1455, "usage_type": "call"}, {"api_name": "numpy.atleast_1d", "line_number": 1460, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 1463, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 1466, "usage_type": "call"}, {"api_name": "oggm.cfg.SEC_IN_DAY", "line_number": 1466, "usage_type": "name"}, {"api_name": "oggm.core.massbalance.MassBalanceModel", "line_number": 1471, "usage_type": "name"}, {"api_name": "oggm.exceptions.InvalidWorkflowError", "line_number": 1530, "usage_type": "call"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 1549, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 1549, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 1651, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 1652, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 1653, "usage_type": "call"}, {"api_name": "numpy.atleast_1d", "line_number": 1667, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 1669, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1677, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 1680, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 1682, "usage_type": "call"}, {"api_name": "oggm.cfg.SEC_IN_YEAR", "line_number": 1682, "usage_type": "name"}, {"api_name": "numpy.average", "line_number": 1684, "usage_type": "call"}, {"api_name": "numpy.atleast_1d", "line_number": 1691, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 1692, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 1698, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 1700, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1700, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 1702, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1764, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 1767, "usage_type": "call"}, {"api_name": "oggm.entity_task", "line_number": 1705, "usage_type": "call"}, {"api_name": "oggm.workflow.execute_entity_task", "line_number": 1810, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 1817, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 1817, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 1819, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1824, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1824, "usage_type": "attribute"}, {"api_name": "oggm.cfg.PATHS", "line_number": 1824, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 1824, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 1831, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1831, "usage_type": "attribute"}, {"api_name": "oggm.utils._workflow.global_task", "line_number": 1773, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1871, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1873, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 1876, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidWorkflowError", "line_number": 1880, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidWorkflowError", "line_number": 1889, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidWorkflowError", "line_number": 1891, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidWorkflowError", "line_number": 1893, "usage_type": "call"}, {"api_name": "oggm.exceptions.InvalidWorkflowError", "line_number": 1895, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1901, "usage_type": "call"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 1927, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 1927, "usage_type": "name"}, {"api_name": "numpy.isfinite", "line_number": 1933, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 1936, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 1941, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 1941, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 1950, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 1952, "usage_type": "call"}]} +{"seq_id": "478032224", "text": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django_engvtweb.engvtweb import settings\n\nadmin.autodiscover()\n\nimport views\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'gettingstarted.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^$', views.index, name='index'),\n # url(r'^db', hello.views.db, name='db'),\n url(r'^grappelli/', include('grappelli.urls')),\n url(r'^team_order/', include('django_engvtweb.team_order.urls', namespace='team_order')),\n url(r'^cart/', include('django_engvtweb.cart.urls', namespace='cart')),\n url(r'^search/', include('haystack.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^accounts/', include('django.contrib.auth.urls')),\n url(r'^my-account/', views.my_account, name='my-account'),\n)\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += patterns('',\n url(r'^__debug__/', include(debug_toolbar.urls)),\n )", "sub_path": "django_engvtweb/engvtweb/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 988, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 5, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 5, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "views.index", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "views.my_account", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django_engvtweb.engvtweb.settings.DEBUG", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django_engvtweb.engvtweb.settings", "line_number": 25, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 28, "usage_type": "call"}, {"api_name": "debug_toolbar.urls", "line_number": 28, "usage_type": "attribute"}]} +{"seq_id": "302637190", "text": "from datetime import datetime\nimport re, sys, uuid, requests\n\nfrom flask import request, current_app\nfrom sqlalchemy import func\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm import validates\nfrom sqlalchemy.schema import FetchedValue\nfrom app.extensions import db\n\nfrom app.api.utils.models_mixins import SoftDeleteMixin, AuditMixin, Base\n\nfrom app.api.parties.party.models.party import Party\nfrom app.api.parties.party_appt.models.mine_party_appt_document_xref import MinePartyApptDocumentXref\nfrom app.api.constants import PERMIT_LINKED_CONTACT_TYPES\n\n\nclass MinePartyAppointment(SoftDeleteMixin, AuditMixin, Base):\n __tablename__ = \"mine_party_appt\"\n # Columns\n mine_party_appt_id = db.Column(db.Integer, primary_key=True, server_default=FetchedValue())\n mine_party_appt_guid = db.Column(UUID(as_uuid=True), server_default=FetchedValue())\n mine_guid = db.Column(UUID(as_uuid=True), db.ForeignKey('mine.mine_guid'))\n party_guid = db.Column(UUID(as_uuid=True), db.ForeignKey('party.party_guid'))\n mine_party_appt_type_code = db.Column(\n db.String(3), db.ForeignKey('mine_party_appt_type_code.mine_party_appt_type_code'))\n start_date = db.Column(db.DateTime)\n end_date = db.Column(db.DateTime)\n processed_by = db.Column(db.String(60), server_default=FetchedValue())\n processed_on = db.Column(db.DateTime, nullable=False, server_default=FetchedValue())\n\n #type specific foreign keys\n mine_tailings_storage_facility_guid = db.Column(\n UUID(as_uuid=True),\n db.ForeignKey('mine_tailings_storage_facility.mine_tailings_storage_facility_guid'))\n permit_id = db.Column(db.Integer, db.ForeignKey('permit.permit_id'))\n permit = db.relationship('Permit', lazy='select')\n\n # Relationships\n party = db.relationship('Party', lazy='joined')\n\n mine_party_appt_type = db.relationship(\n 'MinePartyAppointmentType',\n backref='mine_party_appt',\n order_by='desc(MinePartyAppointmentType.display_order)',\n lazy='joined')\n\n documents = db.relationship(\n 'MineDocument', lazy='joined', secondary='mine_party_appt_document_xref')\n\n def assign_related_guid(self, related_guid):\n from app.api.mines.permits.permit.models.permit import Permit\n\n if self.mine_party_appt_type_code == \"EOR\":\n self.mine_tailings_storage_facility_guid = related_guid\n\n if self.mine_party_appt_type_code in PERMIT_LINKED_CONTACT_TYPES:\n permit = Permit.find_by_permit_guid(related_guid)\n if not permit:\n raise AssertionError(f'Permit with guid {related_guid} not found')\n self.permit_id = permit.permit_id\n return\n\n def save(self, commit=True):\n if commit:\n if not (self.permit or self.permit_id or self.mine_guid or self.mine):\n raise AssertionError(\"Must have a related permit or mine\")\n\n if self.mine_party_appt_type_code == 'PMT' and (self.mine_guid\n or self.mine) is not None:\n raise AssertionError(\"Contacts linked to a permit are not related to mines\")\n\n if self.mine_party_appt_type_code in [\n 'THD', 'LDO', 'MOR'\n ] and (self.mine_guid or self.mine) is not None and (self.permit_id\n or self.permit) is not None:\n raise AssertionError(\"Contacts linked to a permit are not related to mines\")\n\n super(MinePartyAppointment, self).save(commit)\n\n def json(self, relationships=[]):\n result = {\n 'mine_party_appt_guid': str(self.mine_party_appt_guid),\n 'mine_guid': str(self.mine_guid) if self.mine_guid else None,\n 'party_guid': str(self.party_guid),\n 'mine_party_appt_type_code': str(self.mine_party_appt_type_code),\n 'start_date': str(self.start_date) if self.start_date else None,\n 'end_date': str(self.end_date) if self.end_date else None,\n 'documents': [doc.json() for doc in self.documents]\n }\n if 'party' in relationships:\n result.update({'party': self.party.json(show_mgr=False) if self.party else str({})})\n related_guid = \"\"\n if self.mine_party_appt_type_code == \"EOR\":\n related_guid = str(self.mine_tailings_storage_facility_guid)\n elif self.mine_party_appt_type_code in PERMIT_LINKED_CONTACT_TYPES and self.permit:\n related_guid = str(self.permit.permit_guid)\n result[\"related_guid\"] = related_guid\n return result\n\n # search methods\n @classmethod\n def find_by_mine_party_appt_guid(cls, _id):\n try:\n return cls.query.filter_by(mine_party_appt_guid=_id).filter_by(\n deleted_ind=False).first()\n except ValueError:\n return None\n\n # FIXME: This is only being used in one test, and is broken by permittee changes. Remove?\n @classmethod\n def find_by_mine_guid(cls, _id):\n try:\n return cls.find_by(mine_guid=_id)\n except ValueError:\n return None\n\n @classmethod\n def find_by_party_guid(cls, _id):\n try:\n return cls.find_by(party_guid=_id)\n except ValueError:\n return None\n\n @classmethod\n def find_by_permit_id(cls, _id):\n return cls.query.filter_by(permit_id=_id).filter_by(deleted_ind=False).all()\n\n\n# given a permmit, and an issue date of a new amendment, order appointment start_dates\n# return the all appointment start_dates in order\n\n @classmethod\n def find_appointment_end_dates(cls, _id, issue_datetime):\n start_dates = [issue_datetime]\n appointments = cls.find_by_permit_id(_id)\n for appointment in appointments:\n start_dates.append(appointment.start_date)\n ordered_dates = sorted(start_dates, reverse=True)\n return ordered_dates\n\n @classmethod\n def find_parties_by_mine_party_appt_type_code(cls, code):\n try:\n return cls.find_by(mine_party_appt_type_codes=[code])\n except ValueError:\n return None\n\n @classmethod\n def find_current_appointments(cls,\n mine_guid=None,\n mine_party_appt_type_code=None,\n permit_id=None,\n mine_tailings_storage_facility_guid=None):\n built_query = cls.query.filter_by(deleted_ind=False, mine_guid=mine_guid, end_date=None)\n if permit_id:\n built_query = built_query.filter_by(permit_id=permit_id)\n if mine_tailings_storage_facility_guid:\n built_query = built_query.filter_by(\n mine_tailings_storage_facility_guid=mine_tailings_storage_facility_guid)\n if isinstance(mine_party_appt_type_code, list):\n built_query = built_query.filter(\n cls.mine_party_appt_type_code.in_(mine_party_appt_type_code))\n else:\n built_query = built_query.filter_by(mine_party_appt_type_code=mine_party_appt_type_code)\n return built_query.all()\n\n @classmethod\n def find_by(cls,\n mine_guid=None,\n party_guid=None,\n mine_party_appt_type_codes=None,\n include_permittees=False,\n active_only=True):\n built_query = cls.query.filter_by(deleted_ind=False)\n if mine_guid:\n built_query = built_query.filter_by(mine_guid=mine_guid)\n if party_guid:\n built_query = built_query.filter_by(party_guid=party_guid)\n if mine_party_appt_type_codes:\n built_query = built_query.filter(\n cls.mine_party_appt_type_code.in_(mine_party_appt_type_codes))\n results = built_query.all()\n\n if include_permittees and mine_guid:\n #avoid circular imports.\n from app.api.mines.mine.models.mine import Mine\n mine = Mine.find_by_mine_guid(mine_guid)\n permit_permittees = []\n for mp in mine.mine_permit:\n if not active_only:\n permit_permittees = permit_permittees + mp.permittee_appointments\n else:\n for pa in mp.permittee_appointments:\n if pa.end_date is None or (\n (pa.start_date is None or pa.start_date <= datetime.utcnow().date())\n and pa.end_date >= datetime.utcnow().date()):\n permit_permittees.append(pa)\n results = results + permit_permittees\n return results\n\n @classmethod\n def to_csv(cls, records, columns):\n rows = [','.join(columns)]\n for record in records:\n row = []\n for column in columns:\n row.append(str(getattr(record, column)))\n rows.append(','.join(row))\n return '\\n'.join(rows)\n\n @classmethod\n def create(cls,\n mine,\n party_guid,\n processed_by,\n mine_party_appt_type_code,\n start_date=None,\n end_date=None,\n permit=None,\n add_to_session=True):\n mpa = cls(\n mine=mine,\n party_guid=party_guid,\n mine_party_appt_type_code=mine_party_appt_type_code,\n start_date=start_date,\n end_date=end_date,\n processed_by=processed_by)\n if mine_party_appt_type_code in PERMIT_LINKED_CONTACT_TYPES or permit:\n mpa.permit = permit\n if add_to_session:\n mpa.save(commit=False)\n return mpa\n\n # validators\n @validates('party_guid')\n def validate_party_guid(self, key, val):\n if not val:\n raise AssertionError('No party guid provided.')\n return val\n\n @validates('mine_party_appt_type_code')\n def validate_mine_party_appt_type_code(self, key, val):\n if not val:\n raise AssertionError('No mine party appointment type code')\n if len(val) is not 3:\n raise AssertionError('invalid mine party appointment type code')\n return val\n\n @validates('mine_tailings_storage_facility_guid')\n def validate_mine_tailings_storage_facility_guid(self, key, val):\n if self.mine_party_appt_type_code == 'EOR':\n if not val:\n raise AssertionError(\n 'No mine_tailings_storage_facility_guid, but mine_party_appt_type_code is EOR.')\n return val", "sub_path": "services/core-api/app/api/parties/party_appt/models/mine_party_appt.py", "file_name": "mine_party_appt.py", "file_ext": "py", "file_size_in_byte": 10617, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "app.api.utils.models_mixins.SoftDeleteMixin", "line_number": 19, "usage_type": "name"}, {"api_name": "app.api.utils.models_mixins.AuditMixin", "line_number": 19, "usage_type": "name"}, {"api_name": "app.api.utils.models_mixins.Base", "line_number": 19, "usage_type": "name"}, {"api_name": "app.extensions.db.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 22, "usage_type": "name"}, {"api_name": "app.extensions.db.Integer", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sqlalchemy.schema.FetchedValue", "line_number": 22, "usage_type": "call"}, {"api_name": "app.extensions.db.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.postgresql.UUID", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.schema.FetchedValue", "line_number": 23, "usage_type": "call"}, {"api_name": "app.extensions.db.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.postgresql.UUID", "line_number": 24, "usage_type": "call"}, {"api_name": "app.extensions.db.ForeignKey", "line_number": 24, "usage_type": "call"}, {"api_name": "app.extensions.db.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.postgresql.UUID", "line_number": 25, "usage_type": "call"}, {"api_name": "app.extensions.db.ForeignKey", "line_number": 25, "usage_type": "call"}, {"api_name": "app.extensions.db.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 26, "usage_type": "name"}, {"api_name": "app.extensions.db.String", "line_number": 27, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 27, "usage_type": "name"}, {"api_name": "app.extensions.db.ForeignKey", "line_number": 27, "usage_type": "call"}, {"api_name": "app.extensions.db.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 28, "usage_type": "name"}, {"api_name": "app.extensions.db.DateTime", "line_number": 28, "usage_type": "attribute"}, {"api_name": "app.extensions.db.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 29, "usage_type": "name"}, {"api_name": "app.extensions.db.DateTime", "line_number": 29, "usage_type": "attribute"}, {"api_name": "app.extensions.db.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 30, "usage_type": "name"}, {"api_name": "app.extensions.db.String", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.schema.FetchedValue", "line_number": 30, "usage_type": "call"}, {"api_name": "app.extensions.db.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 31, "usage_type": "name"}, {"api_name": "app.extensions.db.DateTime", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sqlalchemy.schema.FetchedValue", "line_number": 31, "usage_type": "call"}, {"api_name": "app.extensions.db.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 34, "usage_type": "name"}, {"api_name": "sqlalchemy.dialects.postgresql.UUID", "line_number": 35, "usage_type": "call"}, {"api_name": "app.extensions.db.ForeignKey", "line_number": 36, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 36, "usage_type": "name"}, {"api_name": "app.extensions.db.Column", "line_number": 37, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 37, "usage_type": "name"}, {"api_name": "app.extensions.db.Integer", "line_number": 37, "usage_type": "attribute"}, {"api_name": "app.extensions.db.ForeignKey", "line_number": 37, "usage_type": "call"}, {"api_name": "app.extensions.db.relationship", "line_number": 38, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 38, "usage_type": "name"}, {"api_name": "app.extensions.db.relationship", "line_number": 41, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 41, "usage_type": "name"}, {"api_name": "app.extensions.db.relationship", "line_number": 43, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 43, "usage_type": "name"}, {"api_name": "app.extensions.db.relationship", "line_number": 49, "usage_type": "call"}, {"api_name": "app.extensions.db", "line_number": 49, "usage_type": "name"}, {"api_name": "app.api.constants.PERMIT_LINKED_CONTACT_TYPES", "line_number": 58, "usage_type": "name"}, {"api_name": "app.api.mines.permits.permit.models.permit.Permit.find_by_permit_guid", "line_number": 59, "usage_type": "call"}, {"api_name": "app.api.mines.permits.permit.models.permit.Permit", "line_number": 59, "usage_type": "name"}, {"api_name": "app.api.constants.PERMIT_LINKED_CONTACT_TYPES", "line_number": 97, "usage_type": "name"}, {"api_name": "app.api.mines.mine.models.mine.Mine.find_by_mine_guid", "line_number": 189, "usage_type": "call"}, {"api_name": "app.api.mines.mine.models.mine.Mine", "line_number": 189, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 197, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 197, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 198, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 198, "usage_type": "name"}, {"api_name": "app.api.constants.PERMIT_LINKED_CONTACT_TYPES", "line_number": 230, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.validates", "line_number": 237, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.validates", "line_number": 243, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.validates", "line_number": 251, "usage_type": "call"}]} +{"seq_id": "12110268", "text": "from django.test import TestCase\n\nfrom oscar.apps.partner import availability\nfrom oscar.test import factories, decorators\n\n\n@decorators.ignore_deprecation_warnings\nclass TestDelegateToStockRecordWrapper(TestCase):\n\n def setUp(self):\n self.product = factories.create_product()\n self.stockrecord = factories.create_stockrecord(self.product)\n self.assertTrue(self.product.get_product_class().track_stock)\n\n self.availability = availability.DelegateToStockRecord(\n self.product, self.stockrecord)\n\n def test_delegates_is_available_to_buy(self):\n self.assertEquals(\n self.stockrecord.is_available_to_buy,\n self.availability.is_available_to_buy)\n\n def test_delegates_is_purchase_permitted(self):\n self.assertEquals(\n self.stockrecord.is_purchase_permitted(1),\n self.availability.is_purchase_permitted(quantity=1))\n\n def test_delegates_availability_code(self):\n self.assertEquals(\n self.stockrecord.availability_code,\n self.availability.code)\n\n def test_delegates_availability_message(self):\n self.assertEquals(\n self.stockrecord.availability,\n self.availability.message)\n\n def test_delegates_dispatch_date(self):\n self.assertEquals(\n self.stockrecord.dispatch_date,\n self.availability.dispatch_date)\n", "sub_path": "tests/integration/partner/availability_tests.py", "file_name": "availability_tests.py", "file_ext": "py", "file_size_in_byte": 1397, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.test.TestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "oscar.test.factories.create_product", "line_number": 11, "usage_type": "call"}, {"api_name": "oscar.test.factories", "line_number": 11, "usage_type": "name"}, {"api_name": "oscar.test.factories.create_stockrecord", "line_number": 12, "usage_type": "call"}, {"api_name": "oscar.test.factories", "line_number": 12, "usage_type": "name"}, {"api_name": "oscar.apps.partner.availability.DelegateToStockRecord", "line_number": 15, "usage_type": "call"}, {"api_name": "oscar.apps.partner.availability", "line_number": 15, "usage_type": "name"}, {"api_name": "oscar.test.decorators.ignore_deprecation_warnings", "line_number": 7, "usage_type": "attribute"}, {"api_name": "oscar.test.decorators", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "557894936", "text": "from spacy.lang.en import English\nfrom bert_serving.client import BertClient\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import pairwise_distances_argmin_min\n\n\nimport spacy\nimport util_bert as util\nimport os\nimport numpy as np\n\n\nDOCS_DIR = \"/home/nishitasnani/cs221_summarization/DUC2004_Summarization_Documents/duc2004_testdata/tasks1and2/duc2004_tasks1and2_docs/docs/\"\n\n\ndef start_bert_client():\n bc = BertClient()\n return bc\n\n\ndef fetch_all_docs(path):\n docs = []\n for doc in os.listdir(path):\n fp = open(path + \"/\" + doc, \"r\")\n flag = 0\n doc_text = ''\n for line in fp.readlines():\n # print(\"-->\", line)\n # if line.strip() == \"\":\n # flag = 0\n # break\n #elif flag == 1:\n doc_text += (line.strip() + ' ')\n #elif line.strip() == \"\":\n # flag = 1\n #else:\n # continue\n \n docs.append(doc_text)\n return docs\n\n\ndef fetch_bert_embeddings(topic_docs):\n \"\"\"\n Fetch BERT embeddings of all sentences within topic_docs\n \"\"\"\n embeddings = []\n sentences = []\n for doc in topic_docs:\n doc_sents = util.split_into_sentences(nlp, doc)\n sentences.extend(doc_sents)\n doc_embs = bc.encode(doc_sents)\n # print(len(doc_embs[0]))\n embeddings.extend(doc_embs)\n embeddings = np.array(embeddings)\n return embeddings, sentences\n\n \nif __name__ == '__main__':\n bc = start_bert_client()\n nlp = English()\n nlp.add_pipe(nlp.create_pipe('sentencizer')) # updated\n topic_codes = ['d30020t']\n for code in topic_codes:\n topic_docs = fetch_all_docs(DOCS_DIR + code)\n print(len(topic_docs))\n # print(topic_docs[0])\n embeddings, sentences = fetch_embeddings(topic_docs)\n print(embeddings.shape)\n km = KMeans(n_clusters=6).fit(embeddings)\n closest, _ = pairwise_distances_argmin_min(\n km.cluster_centers_, embeddings)\n print(closest.shape)\n closest = sorted(closest)\n for idx in closest:\n print(sentences[idx])\n", "sub_path": "argument_classification/acl2019-BERT-argument-classification-and-clustering/argument-similarity/bert_clustering.py", "file_name": "bert_clustering.py", "file_ext": "py", "file_size_in_byte": 2103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "bert_serving.client.BertClient", "line_number": 17, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 23, "usage_type": "call"}, {"api_name": "util_bert.split_into_sentences", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "spacy.lang.en.English", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise_distances_argmin_min", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "103103526", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0017_auto_20160419_1927'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='bookingpackagepanel',\n name='booking_package',\n field=models.ForeignKey(related_name='booking_package_panel', to='core.BookingPackage'),\n ),\n ]\n", "sub_path": "bumper2/core/old_migrations/0018_auto_20160419_1952.py", "file_name": "0018_auto_20160419_1952.py", "file_ext": "py", "file_size_in_byte": 471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "21879768", "text": "import numpy as np\nimport pandas as pd\nfrom dfply import *\nfrom pprint import pprint\nfrom os import listdir\nfrom os.path import isfile, join\nfrom time import time\nimport datetime\nimport matplotlib.pyplot as plt\nfrom src.utils import rolling_window, NumpyEncoder\nfrom src.GBL import GBLM\nfrom src.MLShepard import MLShepard\nfrom src.MondrianForest import MondrianForest\nfrom src.OARIMA import OARIMA\nfrom src.OSVR import OSVR\nfrom src.RandConLSTM import RandConLSTM\nfrom src.WHLR import WHLR\n\nimport sys\nsys.setrecursionlimit(sys.getrecursionlimit() * 100)\n\nTRAIN_PORTION = .8\n\nDIM = 200\n# DIM = 15\n\n# Uncomment the method and its parameters to include the corresponding result\nmethods = {\n 'GBLM': {\n 'class': GBLM,\n 'options': {\n 'dimension': DIM,\n 'epsilon': 5e-3,\n 'forgetting_rate': .59,\n 'p_learning_rate': .008,\n 's_learning_rate': .001,\n 'decay_rate': .25,\n 'oe_penalty': -1.5,\n 'ue_penalty': -1.5,\n 'reward': 1,\n 'epochs': 1\n }\n },\n # 'MLShepard': {\n # 'class': MLShepard,\n # 'options': {\n # 'future_scope': 3,\n # 'dimension': DIM,\n # 'minor_days': 3,\n # 'trust_treshold': 4,\n # 'max_point_usage': 5,\n # 'avr_elemwise_dist': 0.04,\n # 'epsilon': 1e-10\n # }\n # },\n # 'OARIMA (ogd)': {\n # 'class': OARIMA,\n # 'options': {\n # 'dimension': DIM,\n # 'lrate': 1e-2,\n # 'epsilon': 1e-10,\n # 'method': 'ogd'\n # }\n # },\n # 'OARIMA (ons)': {\n # 'class': OARIMA,\n # 'options': {\n # 'dimension': DIM,\n # 'lrate': 1e-2,\n # 'epsilon': 1e-10,\n # 'method': 'ons'\n # }\n # },\n # 'OSVR': {\n # 'class': OSVR,\n # 'options': {\n # 'future_scope': 3,\n # 'dimension': DIM,\n # 'C': 10,\n # 'kernelParam': 30,\n # 'epsilon': 1e-10\n # }\n # }, RUNNING TIME IS: [ 9.84e-002, -3.39e-003, 2.63e-005, 5.94e-007] @ [n, n**2, n**3, n**4]\n # 'LSTM': {\n # 'class': RandConLSTM,\n # 'options': {\n # 'future_scope': 3,\n # 'dimension': DIM,\n # 'epochs': 2,\n # 'batch_size': 128,\n # 'num_layers': 1,\n # 'epsilon': 1e-10,\n # 'hidden_size': 100,\n # 'connectivity': 1\n # }\n # },\n # 'RandConLSTM': {\n # 'class': RandConLSTM,\n # 'options': {\n # 'future_scope': 3,\n # 'dimension': DIM,\n # 'epochs': 2,\n # 'batch_size': 128,\n # 'num_layers': 1,\n # 'epsilon': 1e-10,\n # 'hidden_size': 100,\n # 'connectivity': .2\n # }\n # },\n # 'WHLR': {\n # 'class': WHLR,\n # 'options': {\n # 'future_scope': 3,\n # 'dimension': DIM,\n # 'avr_elemwise_dist': 0.04,\n # 'learning_rate': 1e-2\n # }\n # },\n # 'MondrianForest': {\n # 'class': MondrianForest,\n # 'options': {\n # 'future_scope': 3,\n # 'dimension': DIM\n # }\n # },\n}\n\nprint('Preparing dataset...')\n# Here is the data directory. Each stock/crypto must be stored in a seperated csv file\ndataDir = 'data/stocks'\ndataFiles = {f: join(dataDir, f) for f in listdir(dataDir) if isfile(join(dataDir, f)) and f[-4:] == '.csv' and f not in ['stock_metadata.csv', 'NIFTY50_all.csv']}\nprint(list(dataFiles.keys()))\npriceIndices = {f: pd.read_csv(dataFiles[f]) for f in dataFiles}\n\n# dataFiles = {'dummy1': 1, 'dummy2': 1, 'dummy3': 1, 'dummy4': 1, 'dummy5': 1, 'dummy6': 1}\n# T_SIZE = 3000\n# priceIndices = {\n# f: pd.DataFrame({\n# 'Date': list(range(T_SIZE)),\n# 'Price': np.random.normal(\n# np.random.uniform(70, 300),\n# np.random.uniform(1, 1.5),\n# (T_SIZE,)\n# )\n# }) for f in dataFiles\n# }\n\nprices = {}\npricePartitions = {'train': {}, 'test': {}}\ntrueVals = {}\nintervalLength = float('Inf')\n# intervalLength = 0\n\nfor cryptoID in priceIndices:\n priceIndices[cryptoID].fillna(method='ffill')\n priceIndices[cryptoID][\"Date\"] = priceIndices[cryptoID][\"Date\"].astype(\"datetime64[ns]\")\n priceIndices[cryptoID] = priceIndices[cryptoID] >> arrange(X.Date)\n indexLength = priceIndices[cryptoID].shape[0]\n indexMean = mean(priceIndices[cryptoID][\"Price\"].values)\n prices[cryptoID] = priceIndices[cryptoID][\"Price\"].values + np.random.normal(loc=0, scale=indexMean/500, size=indexLength)\n intervalLength = min(indexLength, intervalLength)\n # intervalLength = min(2000, intervalLength)\n\ncutOff = int(intervalLength * TRAIN_PORTION)\n\nfor cryptoID in priceIndices:\n # if intervalLength != prices[cryptoID].shape[0]:\n # prices[cryptoID] = np.concatenate((\n # prices[cryptoID],\n # np.repeat(prices[cryptoID][-1], intervalLength - prices[cryptoID].shape[0])\n # ))\n \n pricePartitions['train'][cryptoID] = prices[cryptoID][:cutOff]\n pricePartitions['test'][cryptoID] = rolling_window(prices[cryptoID][cutOff:intervalLength], (DIM+1))[:-1]\n trueVals[cryptoID] = prices[cryptoID][cutOff:intervalLength][(DIM+1):]\n\n\nMSE = lambda truth, estimate, _prices: np.sqrt(np.mean((truth-estimate)**2))\nPMSE = lambda truth, estimate, _prices: np.sqrt(np.mean(((truth-estimate)/truth)**2))\nPASE = lambda truth, estimate, _prices: np.mean((np.abs(truth-estimate)/truth))\nDMSE = lambda truth, estimate, prices: np.sqrt(np.mean((np.heaviside(-(truth - prices[:,-1])*(estimate - prices[:,-1]), [0]) * (truth-estimate)/truth)**2))\nwrongs = lambda truth, estimate, prices: np.sqrt(np.mean(np.heaviside(-(truth - prices[:,-1])*(estimate - prices[:,-1]), [0])))\n# DMSESD = lambda truth, estimate, prices: np.sqrt(np.std((np.heaviside(-(truth - prices[:,-1])*(estimate - prices[:,-1]), [0]) * (truth-estimate)/truth)**2))\n# DMSE = lambda truth, estimate, prices: print(*[truth, estimate, prices], sep='\\n')\n\n# methods['MondrianForest']['later_values'] = {'X': pricePartitions['test'], 'f': trueVals}\nimport json\nfor method_name in methods:\n print(\"==================== %s ====================\"%(method_name))\n method = methods[method_name]\n pClass, options = method['class'], method['options']\n model = pClass(**options)\n\n print('Fitting model...')\n startTime = time()\n model.fit({f: pricePartitions['train'][f] for f in dataFiles})\n fittedTime = time()\n\n print('Predicting values...')\n predStartTime = time()\n res = model.predict(pricePartitions['test'], update=True, true_values=trueVals,\n loss_functions={'MSE': MSE, 'PMSE': PMSE, 'PASE': PASE, 'DMSE': DMSE, 'wrongs': wrongs})\n finishedTime = time()\n\n pprint({coin: {l: np.mean(res[1][coin][l]) for l in res[1][coin]} for coin in res[1]})\n\n print('Plotting results...')\n indices = np.random.choice(list(dataFiles.keys()), 1, False)\n plt.plot(range((DIM+1)+cutOff, (DIM+1)+cutOff+res[0][indices[0]].shape[0]), res[0][indices[0]])\n plt.plot(range(prices[indices[0]].shape[0]), prices[indices[0]])\n\n learnT = (fittedTime - startTime) * 1000\n predT = (finishedTime - predStartTime) * 1000\n avrPredT = (finishedTime - predStartTime) / (intervalLength-cutOff) * 1000\n totalT = learnT + predT\n timingString = '''\n learning time:\\t%.1f ms\n predicting time:\\t%.1f ms\n prediction/test:\\t%.1f ms\n total time:\\t%.1fms\n '''%(learnT, predT, avrPredT, totalT)\n print(timingString)\n \n print('saving dump...')\n currentTime = datetime.datetime.now()\n dump_file = open('dumps/Results-%s-%s.dmp'%(method_name, currentTime), 'w')\n json.dump(res, dump_file, cls=NumpyEncoder)\n dump_file.close()\n dump_file = open('dumps/Timing-%s-%s.txt'%(method_name, currentTime), 'w')\n dump_file.write(timingString)\n dump_file.close()\n \n\n\nplt.show()\n\n", "sub_path": "master.py", "file_name": "master.py", "file_ext": "py", "file_size_in_byte": 8004, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.setrecursionlimit", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.getrecursionlimit", "line_number": 20, "usage_type": "call"}, {"api_name": "src.GBL.GBLM", "line_number": 30, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 131, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 160, "usage_type": "attribute"}, {"api_name": "src.utils.rolling_window", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.heaviside", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.heaviside", "line_number": 182, "usage_type": "call"}, {"api_name": "time.time", "line_number": 195, "usage_type": "call"}, {"api_name": "time.time", "line_number": 197, "usage_type": "call"}, {"api_name": "time.time", "line_number": 200, "usage_type": "call"}, {"api_name": "time.time", "line_number": 203, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 208, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 225, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 225, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 227, "usage_type": "call"}, {"api_name": "src.utils.NumpyEncoder", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}]} +{"seq_id": "382590004", "text": "from cvxopt.solvers import qp\r\nfrom cvxopt.base import matrix\r\nimport numpy, pylab, random, math\r\nimport sys, getopt\r\n\r\n\r\ndef linear_kernel(x, y):\r\n \"\"\" Returns the linear kernel function of two vectors x and y. \"\"\"\r\n x_t = numpy.transpose(x)\r\n return numpy.dot(x_t, y) + 1\r\n\r\n\r\ndef polynomial_kernel(x, y):\r\n \"\"\" Returns the polynomial kernel function of two vectors x and y. \"\"\"\r\n exp = 4\r\n x_t = numpy.transpose(x)\r\n return (numpy.dot(x_t, y) + 1) ** exp\r\n\r\n\r\ndef radial_basis_kernel(x, y):\r\n \"\"\" Returns the radial basis kernel function of two vectors x and y. \"\"\"\r\n param = 5\r\n squared_euclidean_distance = 0\r\n for square in numpy.power(numpy.subtract(x, y), 2):\r\n squared_euclidean_distance += square\r\n\r\n return math.exp(-squared_euclidean_distance / (math.pow(2 * param, 2)))\r\n\r\n\r\ndef create_random_classified_test_data(size):\r\n \"\"\" Creates random data points (x, y) with two classes -1 and 1. \"\"\"\r\n b_size = int(size / 2)\r\n a_size = size - b_size\r\n a_size_1 = int(a_size / 2)\r\n a_size_2 = a_size - a_size_1\r\n\r\n class_a = [(random.normalvariate(-1.5, 1), random.normalvariate(0.5, 1), 1.0) for i in range(a_size_1)] + [\r\n (random.normalvariate(1.5, 1), random.normalvariate(0.5, 1), 1.0) for i in range(a_size_2)]\r\n\r\n class_b = [(random.normalvariate(0.0, 0.5), random.normalvariate(-0.5, 0.5), -1.0) for i in range(b_size)]\r\n\r\n return class_a, class_b\r\n\r\n\r\ndef plot_data_points(class_a, class_b, indicator_list, kernel_function):\r\n \"\"\" Plots two classes of data. \"\"\"\r\n pylab.hold(True)\r\n\r\n pylab.plot([p[0] for p in class_a], [p[1] for p in class_a], 'bo')\r\n pylab.plot([p[0] for p in class_b], [p[1] for p in class_b], 'ro')\r\n\r\n\r\ndef create_p_matrix(data, kernel_function):\r\n \"\"\" Creates an N x N matrix P.\r\n P_ij = t_i * t_j * K(x_i, x_j)\r\n N is the number of data points.\r\n K is a kernel function.\r\n t is the class (-1 or 1).\r\n x is a vector with data points.\r\n \"\"\"\r\n\r\n N = len(data)\r\n P = numpy.zeros(shape=(N, N))\r\n\r\n for i in range(N):\r\n for j in range(N):\r\n t_i = data[i][2]\r\n t_j = data[j][2]\r\n x_i = data[i][:2]\r\n x_j = data[j][:2]\r\n\r\n P[i, j] = t_i * t_j * kernel_function(x_i, x_j)\r\n\r\n return P\r\n\r\n\r\ndef create_q_and_h_vectors(N):\r\n \"\"\" Creates the q and h vectors necessary for\r\n calling the qp function and finding an optimal\r\n alpha, as stated in the beginning of this file.\r\n \"\"\"\r\n\r\n q = numpy.empty(N)\r\n q.fill(-1)\r\n\r\n h = numpy.zeros(N)\r\n\r\n return q, h\r\n\r\n\r\ndef create_g_matrix(N):\r\n \"\"\" Creates the G matrix necessary for\r\n calling the qp function and finding an optimal\r\n alpha, as stated in the beginning of this file.\r\n \"\"\"\r\n\r\n G = numpy.zeros(shape=(N, N))\r\n numpy.fill_diagonal(G, -1)\r\n\r\n return G\r\n\r\n\r\ndef find_optimal_alphas(data, kernel_function):\r\n \"\"\" Calls the qp function and finds an optimal\r\n alpha, as explained in the beginning of\r\n this file.\r\n \"\"\"\r\n\r\n N = len(data)\r\n\r\n q, h = create_q_and_h_vectors(N)\r\n G = create_g_matrix(N)\r\n\r\n P = create_p_matrix(data, kernel_function)\r\n\r\n # Call qp. This returns a dictionary data structure. The index 'x' contains the alpha values.\r\n r = qp(matrix(P), matrix(q), matrix(G), matrix(h))\r\n alphas = list(r['x'])\r\n\r\n print(alphas)\r\n\r\n return alphas\r\n\r\n\r\ndef pick_non_zero_alphas_and_create_indicator_list(data, alphas):\r\n \"\"\" Picks the support vector alpha values. \"\"\"\r\n indicator_list = []\r\n threshold = 10e-5\r\n\r\n for i in range(len(alphas)):\r\n alpha = alphas[i]\r\n\r\n if alpha > threshold:\r\n x = data[i][0]\r\n y = data[i][1]\r\n t = data[i][2]\r\n\r\n values = (x, y, t, alpha)\r\n indicator_list.append(values)\r\n\r\n print(\"Found \" + str(len(alphas)) + \" alphas in total.\")\r\n print(\"Found \" + str(len(indicator_list)) + \" that were non-zero.\")\r\n\r\n return indicator_list\r\n\r\n\r\ndef indicator_function(x_star, y_star, indicator_list, kernel_function):\r\n \"\"\" The indicator function can classify new data\r\n points x* = (x, y). If positive, the class is 1. If\r\n negative, the class is -1. A value\r\n between -1 and 1 lies on the margin and this\r\n should not happen. The t_i is the class and\r\n x_i is the data point vector.\r\n ind(x*) = sum( alpha_i * t_i * K(x*, x_i) )\r\n \"\"\"\r\n\r\n N = len(indicator_list)\r\n sum = 0\r\n\r\n for i in range(N):\r\n # The indicator_list contains the alpha, the class (t) and data points (x, y).\r\n alpha_i = indicator_list[i][3]\r\n t_i = indicator_list[i][2]\r\n x_i = indicator_list[i][:2]\r\n\r\n sum += alpha_i * t_i * kernel_function([x_star, y_star], x_i)\r\n\r\n return sum\r\n\r\n\r\ndef plot_decision_boundary(indicator_list, kernel_function):\r\n \"\"\" Plots the decision boundary of the classification. \"\"\"\r\n\r\n x_range = numpy.arange(-4, 4, 0.05)\r\n y_range = numpy.arange(-4, 4, 0.05)\r\n\r\n grid = matrix([[indicator_function(x, y, indicator_list, kernel_function) for y in y_range] for x in x_range])\r\n\r\n pylab.contour(x_range, y_range, grid,\r\n (-1.0, 0.0, 1.0),\r\n colors=('red', 'black', 'blue'),\r\n linewidths=(1, 3, 1))\r\n\r\n\r\ndef plot_points_on_margins(indicator_list):\r\n \"\"\" Plots the points on the margin.\r\n This means that they are the support\r\n vectors and that their alphas\r\n are non-zero.\r\n \"\"\"\r\n pylab.plot([p[0] for p in indicator_list], [p[1] for p in indicator_list], 'go')\r\n\r\n\r\ndef run(kernel=radial_basis_kernel, size=10):\r\n # The selected kernel function.\r\n kernel_function = kernel\r\n\r\n # Create random binary classified test data.\r\n class_a, class_b = create_random_classified_test_data(size)\r\n\r\n # Merge the data into one dataset and shuffle it.\r\n data = class_a + class_b\r\n random.shuffle(data)\r\n\r\n # Find the optimal alphas with the given kernel function and data.\r\n alphas = find_optimal_alphas(data, kernel_function)\r\n indicator_list = pick_non_zero_alphas_and_create_indicator_list(data, alphas)\r\n\r\n # Plot the data points and the found decision boundary.\r\n plot_data_points(class_a, class_b, indicator_list, kernel_function)\r\n plot_decision_boundary(indicator_list, kernel_function)\r\n plot_points_on_margins(indicator_list)\r\n pylab.show()\r\n\r\n\r\ndef print_main_help():\r\n \"\"\" Prints help for the input of the main program. \"\"\"\r\n print('assignment.py -k -s ')\r\n\r\n\r\ndef get_kernel_from_input(input):\r\n \"\"\" Validates the input of the kernel function. \"\"\"\r\n functions = {\"linear\": linear_kernel, \"polynomial\": polynomial_kernel, \"radialbasis\": radial_basis_kernel}\r\n if input != \"\" and functions.get(input):\r\n return functions.get(input)\r\n else:\r\n print(\"Valid functions: \" + str(functions.keys()))\r\n print_main_help()\r\n sys.exit(2)\r\n\r\n\r\ndef get_data_size_from_input(input):\r\n \"\"\" Validates the input of the data size. \"\"\"\r\n try:\r\n return int(input)\r\n except ValueError:\r\n print(\"You must supply an integer as data size input.\")\r\n print_main_help()\r\n sys.exit(2)\r\n\r\n\r\ndef main(argv):\r\n \"\"\" Main. Checks all arguments and runs the SVM. \"\"\"\r\n kernel_input = \"\"\r\n data_size_input = \"\"\r\n\r\n try:\r\n opts, args = getopt.getopt(argv, \"hk:s:\", [\"kernel=\", \"size=\"])\r\n\r\n except getopt.GetoptError:\r\n print_main_help()\r\n sys.exit(2)\r\n\r\n for opt, arg in opts:\r\n if opt == '-h':\r\n print_main_help()\r\n sys.exit()\r\n elif opt in (\"-k\", \"--kernel\"):\r\n kernel_input = arg\r\n elif opt in (\"-s\", \"--size\"):\r\n data_size_input = arg\r\n\r\n kernel_function = get_kernel_from_input(kernel_input)\r\n data_size = get_data_size_from_input(data_size_input)\r\n\r\n print(\"Kernel: \" + kernel_input)\r\n print(\"Data size: \" + data_size_input)\r\n\r\n run(kernel_function, data_size)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:])\r\n", "sub_path": "Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 8140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.transpose", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 24, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 27, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 27, "usage_type": "call"}, {"api_name": "random.normalvariate", "line_number": 37, "usage_type": "call"}, {"api_name": "random.normalvariate", "line_number": 38, "usage_type": "call"}, {"api_name": "random.normalvariate", "line_number": 40, "usage_type": "call"}, {"api_name": "pylab.hold", "line_number": 47, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 49, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 98, "usage_type": "call"}, {"api_name": "cvxopt.solvers.qp", "line_number": 117, "usage_type": "call"}, {"api_name": "cvxopt.base.matrix", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 175, "usage_type": "call"}, {"api_name": "cvxopt.base.matrix", "line_number": 177, "usage_type": "call"}, {"api_name": "pylab.contour", "line_number": 179, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 191, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 203, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 213, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 229, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 239, "usage_type": "call"}, {"api_name": "getopt.getopt", "line_number": 248, "usage_type": "call"}, {"api_name": "getopt.GetoptError", "line_number": 250, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 252, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 257, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 273, "usage_type": "attribute"}]} +{"seq_id": "636246024", "text": "import itertools\ndef prime(number) :\n if number == 1 or number % 2 == 0 and number != 2 :\n return False\n loopVar = 2\n while loopVar * loopVar <= number :\n if number % loopVar == 0 :\n return False\n loopVar += 1\n return True\ndef listPermutations(number) :\n tempList = list(itertools.permutations(map(int,list(str(number)))))\n ansList = []\n for loopVar in tempList :\n tempNumber = 0\n for loopVar2 in loopVar :\n tempNumber = tempNumber * 10 + loopVar2\n ansList.append(tempNumber)\n return sorted(ansList)[::-1]\n\ndigitCount = 9\nloopCount = 0\nflag = True\nwhile flag :\n digitTest = 0\n permuteList = []\n #print (digitCount)\n for loopVar in range(1,digitCount + 1) :\n loopCount += 1\n digitTest = (digitTest * 10) + loopVar\n #print (digitTest)\n permuteList = listPermutations(int(\"\".join(str(digitTest)[::-1])))\n primeListCheck = list(map(prime,permuteList))\n #print(primeListCheck)\n if True in primeListCheck :\n print (permuteList[primeListCheck.index(True)])\n flag = False\n break\n #print (loopCount)\n digitCount -= 1\n#print (digitTest)\nprint (\"loopCount=\",loopCount)\n\n\n# Completed\n\n\"\"\"\n\nWe shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital and is also prime.\n\nWhat is the largest n-digit pandigital prime that exists?\n\n\"\"\"", "sub_path": "Project-Euler-master/#41 Pandigital Prime.py", "file_name": "#41 Pandigital Prime.py", "file_ext": "py", "file_size_in_byte": 1463, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "itertools.permutations", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "50215927", "text": "from arcade.gui.widgets import UIDummy, UIBorder, UIWrapper\n\n\ndef test_wrap_calculates_padding():\n # GIVEN\n child = UIDummy()\n\n # WHEN\n widget = UIWrapper(child=child, padding=(1, 2, 3, 4))\n\n # THEN\n assert widget.rect == (-4, -3, 106, 104)\n assert child.rect == (0, 0, 100, 100)\n", "sub_path": "tests/test_gui/test_wrapper_basics.py", "file_name": "test_wrapper_basics.py", "file_ext": "py", "file_size_in_byte": 301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "arcade.gui.widgets.UIDummy", "line_number": 6, "usage_type": "call"}, {"api_name": "arcade.gui.widgets.UIWrapper", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "388200185", "text": "import PySimpleGUI as gui\nfrom os import error, path\n\ndef errorInterface(message, exc):\n gui.popup_error(message, exc, no_titlebar=True)\n\ndef progressBar(list_len):\n layout = [\n [gui.ProgressBar(max_value=list_len, orientation='h', size=(100, 50), key='progbar')],\n [gui.Cancel('Cancelar')]\n ]\n window = gui.Window('Redimensionar', layout, size=(200, 100), icon=False, resizable=False)\n event, values = window.read()\n\n\ndef mainInterface():\n user_input = {}\n\n layout = [\n [gui.Text(\"Informe novo valor de largura (altura será proporcionalmente redimensionada)\")],\n [gui.Input(tooltip='Ex: 650.00', key='width', )],\n [gui.Text()],\n [gui.Text('Informe pasta onde estão os arquivos')],\n [gui.Input(key='folder', tooltip='Ex: C:\\\\Users\\\\hcped\\\\Downloads\\\\Imagens'), gui.FolderBrowse('Procurar', )],\n [gui.OK('Redimensionar', button_color='green'), gui.Cancel('Cancelar', button_color='red')]\n ]\n\n window = gui.Window('Redimensionar', layout, size=(480, 180), resizable=True, icon=False)\n event, user_input = window.read()\n\n user_input.pop(\"Procurar\")\n\n if event == 'Redimensionar':\n try:\n user_input['width'] = float(user_input['width'])\n except Exception as exc:\n errorInterface(message='Largura informada está no formato incorreto', exc=exc)\n return\n\n if not path.exists(user_input['folder']) and not path.isdir(user_input['folder']):\n errorInterface(message='Pasta selecionada é inválido ou inacessivel', exc=error.filename)\n return\n\n return user_input\n\nif __name__ == '__main__':\n raise('Rodar setup.py')", "sub_path": "interface.py", "file_name": "interface.py", "file_ext": "py", "file_size_in_byte": 1687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "PySimpleGUI.popup_error", "line_number": 5, "usage_type": "call"}, {"api_name": "PySimpleGUI.ProgressBar", "line_number": 9, "usage_type": "call"}, {"api_name": "PySimpleGUI.Cancel", "line_number": 10, "usage_type": "call"}, {"api_name": "PySimpleGUI.Window", "line_number": 12, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 20, "usage_type": "call"}, {"api_name": "PySimpleGUI.Input", "line_number": 21, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 22, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 23, "usage_type": "call"}, {"api_name": "PySimpleGUI.Input", "line_number": 24, "usage_type": "call"}, {"api_name": "PySimpleGUI.FolderBrowse", "line_number": 24, "usage_type": "call"}, {"api_name": "PySimpleGUI.OK", "line_number": 25, "usage_type": "call"}, {"api_name": "PySimpleGUI.Cancel", "line_number": 25, "usage_type": "call"}, {"api_name": "PySimpleGUI.Window", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 40, "usage_type": "call"}, {"api_name": "os.error.filename", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.error", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "41814980", "text": "# import pandas\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport sys\nimport os\nimport random\nfrom tqdm import tqdm\nfrom sklearn.preprocessing import normalize\nfrom sae_dataloader import dataloader\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nfrom timenet import TimeNet\n\n# PARAMS:\nCUDA = torch.cuda.is_available()\nEPOCHS = 100\nBATCH_STEP_SIZE = 64\n\nprint('preparing data')\ntrainloader, validloader = dataloader(colab=False, batch_size=BATCH_STEP_SIZE)\n\nnet = TimeNet()\nnet.double()\n# net.load_state_dict(torch.load('weights/model-sae3-checkpoint.pt'))\nif CUDA:\n print('using cuda')\n net.cuda()\n\ncriterion = nn.MSELoss()\nlearning_rate = 0.006\noptimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)\n\nbest_train_loss = 1000\nbest_val_loss = 1000\nlossArr = []\n\ntrain_writer = SummaryWriter(log_dir='logs-tensorboard/train')\nval_writer = SummaryWriter(log_dir='logs-tensorboard/val')\nfor i in range(EPOCHS):\n train_loss_value = 0\n val_loss_value = 0\n train_loss_epoch = np.array([])\n val_loss_epoch = np.array([])\n\n print(\"Epoch {}/{}\".format(i, EPOCHS))\n print('-' * 10)\n # training\n for inputs in tqdm(trainloader):\n input_ = Variable(torch.DoubleTensor(inputs), requires_grad=False)\n if CUDA: \n input_ = input_.cuda()\n input_reversed = input_.data.cpu().numpy() if CUDA else input_.data.numpy()\n input_reversed = np.flip(input_reversed, axis=1).copy()\n input_reversed = Variable(torch.from_numpy(input_reversed).double(), requires_grad=False)\n if CUDA: \n input_reversed = input_reversed.cuda()\n\n optimizer.zero_grad()\n predicted, encoded = net(input_, input_reversed)\n loss = criterion(predicted, input_reversed)\n \n train_loss_value = loss.data.cpu().numpy() * input_.size(0) if CUDA else loss.data.numpy() * input_.size(0)\n # lossArr = np.append(lossArr, [train_loss_value], axis=0)\n train_loss_epoch = np.append(train_loss_epoch, [train_loss_value], axis=0)\n\n loss.backward()\n optimizer.step()\n train_loss_epoch = np.average(train_loss_epoch)\n print('\\ttrain ----> loss: %s' % train_loss_epoch)\n train_writer.add_scalar('loss', train_loss_epoch, i)\n # print('current_loss: %s, best_train_loss: %s' % (train_loss_epoch, best_train_loss))\n \n # validation\n for inputs in tqdm(validloader):\n input_ = Variable(torch.DoubleTensor(inputs), requires_grad=False)\n if CUDA: \n input_ = input_.cuda()\n input_reversed = input_.data.cpu().numpy() if CUDA else input_.data.numpy()\n input_reversed = np.flip(input_reversed, axis=1).copy()\n input_reversed = Variable(torch.from_numpy(input_reversed).double(), requires_grad=False)\n if CUDA: \n input_reversed = input_reversed.cuda()\n\n predicted, encoded = net(input_, input_reversed)\n loss = criterion(predicted, input_reversed)\n\n val_loss_value = loss.data.cpu().numpy() * input_.size(0) if CUDA else loss.data.numpy() * input_.size(0)\n val_loss_epoch = np.append(val_loss_epoch, [val_loss_value], axis=0)\n val_loss_epoch= np.average(val_loss_epoch)\n print('\\tval ----> loss: %s' % val_loss_epoch)\n val_writer.add_scalar('loss', val_loss_epoch, i)\n\n if (val_loss_epoch < best_val_loss):\n best_val_loss = val_loss_epoch\n print('best_val_loss: %s' % best_val_loss)\n if CUDA:\n torch.save(net.cpu().state_dict(), 'weights/timenet.pt')\n print('save weights')\n net.cuda()\n else:\n print('save weights')\n torch.save(net.state_dict(), 'weights/timenet.pt')\n # torch.save(net.state_dict(), 'weights/model-sae3-checkpoint.pt')\n # np.savetxt('loss.csv', lossArr, delimiter=',')\n\n# if CUDA:\n# net.cpu()\n# torch.save(net.state_dict(), 'weights/model-sae3.pt')\n\n\n# if __name__ == '__main__':\n # print('lol')", "sub_path": "sae.py", "file_name": "sae.py", "file_ext": "py", "file_size_in_byte": 4028, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "torch.cuda.is_available", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sae_dataloader.dataloader", "line_number": 25, "usage_type": "call"}, {"api_name": "timenet.TimeNet", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 73, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.DoubleTensor", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "31649373", "text": "from rest_framework import generics, serializers\n\nfrom data_refinery_common.models import Organism\n\n\nclass OrganismSerializer(serializers.ModelSerializer):\n class Meta:\n model = Organism\n fields = (\n \"name\",\n \"taxonomy_id\",\n )\n\n\nclass OrganismListView(generics.ListAPIView):\n \"\"\"\n Paginated list of all the available organisms.\n \"\"\"\n\n queryset = Organism.objects.all()\n serializer_class = OrganismSerializer\n\n\nclass OrganismDetailView(generics.RetrieveAPIView):\n \"\"\"\n Retrieves an organism by its name.\n \"\"\"\n\n lookup_field = \"name\"\n queryset = Organism.objects.all()\n serializer_class = OrganismSerializer\n", "sub_path": "api/data_refinery_api/views/organism.py", "file_name": "organism.py", "file_ext": "py", "file_size_in_byte": 685, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 6, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 6, "usage_type": "name"}, {"api_name": "data_refinery_common.models.Organism", "line_number": 8, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 15, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 15, "usage_type": "name"}, {"api_name": "data_refinery_common.models.Organism.objects.all", "line_number": 20, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Organism.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.Organism", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework.generics.RetrieveAPIView", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 24, "usage_type": "name"}, {"api_name": "data_refinery_common.models.Organism.objects.all", "line_number": 30, "usage_type": "call"}, {"api_name": "data_refinery_common.models.Organism.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.Organism", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "105672096", "text": "from datetime import datetime\nfrom itertools import ifilter\nimport math\nfrom juggernaut import Juggernaut\n\nfrom flask import Flask, request, url_for, redirect, g, session, flash, \\\n abort, render_template\nfrom flask.signals import Namespace\n\nfrom flaskext.csrf import csrf\n\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom sqlalchemy import event as sqla_event\nfrom sqlalchemy.sql.expression import func as sqla_func\nfrom sqlalchemy.orm.interfaces import SessionExtension, EXT_CONTINUE\n\n# App definiton\n\napp = Flask(__name__)\napp.config.from_pyfile('config.cfg')\njug = Juggernaut()\ncsrf(app)\n\n# register additional template commands\n\ndef url_for_other_page(page):\n args = request.view_args.copy()\n args['page'] = page\n return url_for(request.endpoint, **args)\napp.jinja_env.globals['url_for_other_page'] = url_for_other_page\n\n# create custom signals\nsignals = Namespace()\nbefore_flush = signals.signal('models-before-flush')\n\n\n# Add flush signalling to session\nclass FlushSignalExtension(SessionExtension):\n def before_flush(self, session, flush_context, instances):\n before_flush.send(session.app, session=session, instances=instances)\n return EXT_CONTINUE\n\n\ndb = SQLAlchemy(app, session_extensions=[FlushSignalExtension()])\n\n# Data Model\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n code = db.Column(db.String(6))\n name = db.Column(db.String(150))\n lug = db.Column(db.String(100))\n balance = db.Column(db.Integer, nullable=False, default=0)\n allowed_cashier = db.Column(db.Boolean, default=False)\n blocked = db.Column(db.Boolean, default=False, nullable=False)\n\n @staticmethod\n def get_by_code(code):\n user = User.query.filter(User.code==code).first_or_404()\n if user.blocked:\n abort(403)\n return user\n\n def update_balance(self):\n self.balance = sum([bill.accumulated_price for bill in self.bills])\n\n\nclass BillEntry(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n bill_id = db.Column(db.Integer, db.ForeignKey('bill.id'))\n bill = db.relationship('Bill', backref='entries')\n name = db.Column(db.String(150))\n price = db.Column(db.Integer, nullable=False, default=0)\n\n\nclass Bill(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n user = db.relationship(User, uselist=False, backref=\"bills\")\n accumulated_items = db.Column(db.Integer, nullable=False, default=0)\n accumulated_price = db.Column(db.Integer, nullable=False, default=0)\n created = db.Column(db.DateTime, default=sqla_func.current_timestamp())\n\n def update_accumulated(self):\n self.accumulated_price = sum([entry.price for entry in self.entries])\n self.accumulated_items = len(self.entries)\n\n\nclass Voucher(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n code = db.Column(db.String(8))\n redeemed = db.Column(db.Boolean)\n value = db.Column(db.Integer)\n\n\nclass ShopItem(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(150))\n price = db.Column(db.Integer, nullable=False, default=0)\n image = db.Column(db.String(200))\n category = db.Column(db.Integer, nullable=False)\n\n\ndef update_sums_and_balances(app, session, instances):\n \"\"\"Handler for updating bill sum and user balance\"\"\"\n\n # First update all bill sums\n for session_changed in [session.new, session.deleted, session.dirty]:\n predicate = lambda s: isinstance(s, BillEntry)\n for entry in ifilter(predicate, session_changed):\n entry.bill.update_accumulated()\n # Then update all users balances\n for session_changed in [session.new, session.deleted, session.dirty]:\n predicate = lambda s: isinstance(s, Bill)\n for entry in ifilter(predicate, session_changed):\n entry.user.update_balance()\n\nbefore_flush.connect(update_sums_and_balances)\n\n# cashier and user notification helpers\ndef send_updated_balance_notification(user):\n \"\"\"Notifies clients about updated balance\"\"\"\n\n data = {'balance': \"%6.2f\" % (user.balance/100),\n 'updated_on': \"Live aktualisiert: %s\" % datetime.now().strftime('%H:%M @ %d.%m.%Y')}\n jug.publish('updated-balance:%s' % user.code, data)\n\ndef send_new_customer_notification(cashier_code, new_customer_code):\n \"\"\"Notifies cashier clients about new user scan\"\"\"\n\n data = {'code': new_customer_code}\n jug.publish('new-customer:%s' % cashier_code, data)\n\ndef send_redeem_voucher_notification(cashier_code, voucher_code):\n \"\"\"Notifies cashier about newly scanned voucher\"\"\"\n\n data = {'code': voucher_code}\n jug.publish('scanned-voucher:%s' % cashier_code, data)\n\n# view functions\n@app.before_request\ndef set_request_environment():\n g.cashier = None\n if 'cashier' in session:\n g.cashier = User.query.get(session['cashier'])\n if 'scan_device' not in session:\n session['scan_device'] = False\n\n@app.errorhandler(403)\ndef access_denied(e):\n return render_template('error403.html'), 403\n\n@app.errorhandler(404)\ndef access_denied(e):\n return render_template('error404.html'), 404\n\n@app.route('/')\ndef usercode(code):\n \"\"\"Request from User to show balance or from cashier to change to User\"\"\"\n\n if g.cashier:\n if code == g.cashier.code:\n return redirect(url_for('devices', disable_navigation=True, ownercode=g.cashier.code))\n if session['scan_device']:\n send_new_customer_notification(g.cashier.code, code)\n user = User.get_by_code(code)\n return render_template('new_customer_notification.html', disable_navigation=True, user=user)\n return redirect(url_for('new_bill', code=code))\n\n return redirect(url_for('show_balance', code=code))\n\n@app.route('//balance/', defaults={'page': 1})\n@app.route('//balance/page/')\ndef show_balance(code, page):\n user = User.get_by_code(code)\n pagination = Bill.query.filter_by(user=user).paginate(page)\n flens = int(math.floor(user.balance/70))\n return render_template('balance.html', user=user, pagination=pagination, flens=flens)\n\n@app.route('//bill/')\ndef show_bill(code, bill_id):\n user = User.get_by_code(code)\n bill = Bill.query.filter_by(user=user).filter_by(id=bill_id).first_or_404()\n flens = int(math.floor(bill.user.balance/70))\n return render_template('show_bill.html', bill=bill, flens=flens)\n\n@app.route('//new_bill', methods=['GET', 'POST'])\ndef new_bill(code):\n user = User.get_by_code(code)\n if request.method == \"POST\":\n try:\n bill_ids = request.form['bill_ids'].split(\",\")\n bill = Bill(user=user)\n db.session.add(bill)\n for bill_id in bill_ids:\n item = ShopItem.query.get(int(bill_id))\n billentry = BillEntry(name=item.name, price=-item.price, bill=bill)\n db.session.add(billentry)\n db.session.commit()\n return redirect(url_for('show_balance', code=user.code))\n except ValueError:\n flash(\"Error during bill creation, only provide integers!\")\n items = ShopItem.query.order_by(ShopItem.category).all()\n return render_template('new_bill.html', user=user, items=items)\n\n@app.route('//bill//cancel_item/', methods=['GET', 'POST'])\ndef cancel_item(code,bill_id,item_id):\n if not g.cashier:\n abort(403)\n user = User.get_by_code(code)\n bill = Bill.query.get(bill_id)\n item = BillEntry.query.get(item_id)\n if bill != item.bill:\n abort(403)\n if request.method == \"POST\":\n db.session.delete(item)\n flash(\"Removed item %s from bill %i\" % (item.name, bill.id))\n db.session.commit()\n if len(bill.entries) == 0:\n db.session.delete(bill)\n user.update_balance()\n flash(\"Removed whole bill %i\" % (bill.id))\n db.session.commit()\n return redirect(url_for('show_balance',code=user.code))\n return redirect(url_for('show_bill', code=user.code, bill_id=bill_id))\n return render_template('cancel_item.html', user=user, bill=bill, item=item)\n\n@app.route('//voucher', methods=['GET', 'POST'])\ndef redeem_voucher(code):\n if not g.cashier:\n abort(403)\n user = User.get_by_code(code)\n if request.method == \"POST\":\n voucher = Voucher.query.filter_by(code=request.form['vouchercode']).first()\n if voucher:\n if voucher.redeemed:\n flash(\"Used voucher already\")\n else:\n bill = Bill(user=user)\n billentry = BillEntry(bill=bill)\n billentry.name = \"Redeemed voucher (%s, code:%s)\" % (g.cashier.name, voucher.code)\n billentry.price = voucher.value\n voucher.redeemed = True\n db.session.add(bill)\n db.session.add(billentry)\n db.session.commit()\n return render_template(\"voucher_redeemed.html\", voucher=voucher, user=user)\n else:\n flash(\"No such voucher\")\n return render_template(\"redeem_voucher.html\", user=user)\n\n@app.route('//quick_payment', methods=['GET', 'POST'])\ndef quick_payment(code):\n if not g.cashier:\n return redirect(url_for('show_balance', code=code))\n user = User.get_by_code(code)\n if request.method == \"POST\":\n if 'amount' in request.form:\n value = int(request.form['amount']) or 0\n bill = Bill(user=user)\n billentry = BillEntry(name=\"Einzahlung\", price=value, bill=bill)\n db.session.add(billentry)\n db.session.add(bill)\n db.session.commit()\n flash(\"Konto aufgeladen mit %.2f EUR\" % (value/100))\n return redirect(url_for('new_bill', code=code))\n return render_template('quick_payment.html', user=user)\n\n@app.route('/cashier/devices', methods=['GET', 'POST'])\ndef devices():\n if not g.cashier:\n abort(403)\n if request.method == \"POST\":\n if not session['scan_device']:\n session['scan_device'] = True\n else:\n session['scan_device'] = False\n flash(\"Scan device status updated\")\n is_scan_device = session['scan_device']\n return render_template(\"devices.html\", disable_navigation=is_scan_device, is_scan_device=is_scan_device)\n\n@app.route('/cashier/nextcustomer')\ndef nextcustomer():\n return render_template('nextcustomer.html')\n\n@app.route('/cashier/signin', methods=['POST', ])\ndef signin():\n user = User.query.filter_by(code=request.form['code']).first_or_404()\n if request.method == \"POST\" and request.form['password'] == app.config['CASHIER_PASSWORD']:\n session['cashier'] = user.id\n else:\n flash(\"Login failed\")\n return redirect(url_for('show_balance', code=user.code))\n\n@app.route('/cashier/signout')\ndef signout():\n code = g.cashier.code\n session.clear()\n return redirect(url_for('show_balance', code=code))\n\n@app.route('/graph/all')\ndef graphing():\n sellings = {}\n\n for i in range(0, 72):\n sellings[i]=0\n\n entries = BillEntry.query.all()\n\n for entry in entries:\n key = entry.bill.created.hour + (entry.bill.created.day-17) * 24\n\n if key in sellings:\n old = sellings[key]\n sellings[key] = old + 1\n\n return render_template(\"graph.html\", sellings=sellings)\n\n\n@app.route('/voucher/')\ndef vouchercode(code):\n voucher = Voucher.query.filter_by(code=code).first()\n if voucher:\n voucher.valid = True\n else:\n voucher = Voucher()\n voucher.code = code\n voucher.value = 0.0\n voucher.valid = False\n\n if voucher.valid and g.cashier and session['scan_device']:\n send_redeem_voucher_notification(g.cashier.code, code)\n\n return render_template(\"vouchercode.html\", voucher=voucher)\n", "sub_path": "lugcampkasse.py", "file_name": "lugcampkasse.py", "file_ext": "py", "file_size_in_byte": 12034, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.Flask", "line_number": 19, "usage_type": "call"}, {"api_name": "juggernaut.Juggernaut", "line_number": 21, "usage_type": "call"}, {"api_name": "flaskext.csrf.csrf", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.view_args.copy", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.view_args", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.endpoint", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.signals.Namespace", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.interfaces.SessionExtension", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.session.app", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 40, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.interfaces.EXT_CONTINUE", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.ext.sqlalchemy.SQLAlchemy", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.expression.func.current_timestamp", "line_number": 81, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.expression.func", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.session.new", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.session.deleted", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.session.dirty", "line_number": 107, "usage_type": "attribute"}, {"api_name": "itertools.ifilter", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.session.new", "line_number": 112, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 112, "usage_type": "name"}, {"api_name": "flask.session.deleted", "line_number": 112, "usage_type": "attribute"}, {"api_name": "flask.session.dirty", "line_number": 112, "usage_type": "attribute"}, {"api_name": "itertools.ifilter", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 124, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 124, "usage_type": "name"}, {"api_name": "flask.g.cashier", "line_number": 142, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 142, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 143, "usage_type": "name"}, {"api_name": "flask.g.cashier", "line_number": 144, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 144, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 144, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 145, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 146, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.g.cashier", "line_number": 160, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 160, "usage_type": "name"}, {"api_name": "flask.g.cashier", "line_number": 161, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 161, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.g.cashier", "line_number": 162, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 162, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 163, "usage_type": "name"}, {"api_name": "flask.g.cashier", "line_number": 164, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 164, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 166, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 167, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 167, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 169, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 176, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 177, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 183, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 184, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 189, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 189, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 191, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 191, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 201, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 203, "usage_type": "call"}, {"api_name": "flask.g.cashier", "line_number": 207, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 207, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 208, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 213, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 214, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 214, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 216, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 221, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 223, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 223, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 224, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 224, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.g.cashier", "line_number": 229, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 229, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 230, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 232, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 232, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 233, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 233, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 236, "usage_type": "call"}, {"api_name": "flask.g.cashier", "line_number": 240, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 240, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 246, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 248, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 249, "usage_type": "call"}, {"api_name": "flask.g.cashier", "line_number": 253, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 253, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 254, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 254, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 256, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 256, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 257, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 257, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 258, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 258, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 264, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 265, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 265, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 266, "usage_type": "call"}, {"api_name": "flask.g.cashier", "line_number": 270, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 270, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 271, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 272, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 272, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 273, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 274, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 276, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 277, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 278, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 279, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 283, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 287, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 287, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 288, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 288, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 288, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 289, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 291, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 292, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 292, "usage_type": "call"}, {"api_name": "flask.g.cashier", "line_number": 296, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 296, "usage_type": "name"}, {"api_name": "flask.session.clear", "line_number": 297, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 297, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 298, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 298, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 316, "usage_type": "call"}, {"api_name": "flask.g.cashier", "line_number": 330, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 330, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 330, "usage_type": "name"}, {"api_name": "flask.g.cashier", "line_number": 331, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 331, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 333, "usage_type": "call"}]} +{"seq_id": "643309928", "text": "import pandas as pd\nimport tasks\nimport sys\n\nfrom . import base\nfrom datetime import datetime, timedelta\nfrom typing import TypeVar, Generic\nfrom garfield import AdjustReporter, AdjustReportPeriod\nfrom math import ceil\n\nT = TypeVar(tasks.FetchAdjustReportTask)\nclass AdjustReportFetcher(Generic[T], base.ReportFetcher[T]):\n pass\n\nclass AdjustDeliverablesReportFetcher(AdjustReportFetcher[tasks.FetchAdjustDeliverablesReportTask]):\n def fetch(self):\n reporter = AdjustReporter(api=self.task.api)\n\n date = self.task.report_start_date\n report = None\n while date <= self.task.report_end_date:\n date_report = reporter.fetch_deliverables_report(\n start_date=date,\n end_date=date\n )\n date_report['date'] = date\n if report is None:\n report = date_report\n else:\n report = report.append(date_report)\n date = date + timedelta(days=1)\n\n if report is not None:\n report.reset_index(drop=True, inplace=True)\n\n self.task.report = report\n\nclass AdjustEventsReportFetcher(AdjustReportFetcher[tasks.FetchAdjustEventsReportTask]):\n def fetch(self):\n reporter = AdjustReporter(api=self.task.api)\n\n date = self.task.report_start_date\n report = None\n while date <= self.task.report_end_date:\n date_report = reporter.fetch_events_report(\n start_date=date,\n end_date=date\n )\n date_report['date'] = date\n if report is None:\n report = date_report\n else:\n report = report.append(date_report)\n date = date + timedelta(days=1)\n\n if report is not None:\n report.reset_index(drop=True, inplace=True)\n\n self.task.report = report\n\nclass AdjustCohortsMeasuresReportFetcher(AdjustReportFetcher[tasks.FetchAdjustCohortsMeasuresReportTask]):\n def fetch(self):\n reporter = AdjustReporter(api=self.task.api)\n\n cohort = self.task.first_cohort_date\n if self.task.period.max_period is not None:\n first_relevant_date = self.task.report_start_date - timedelta(days=self.task.period.days * self.task.period.max_period)\n irrelevant_cohorts = ceil((first_relevant_date - cohort).days / self.task.cohort_days)\n if irrelevant_cohorts > 0:\n cohort += timedelta(days=self.task.cohort_days * irrelevant_cohorts)\n\n report = None\n cohort_number = 0\n cohort_interval = timedelta(days=self.task.cohort_days - 1)\n while cohort <= self.task.report_end_date:\n cohort_number += 1\n if not cohort_number % 10:\n print(f'Fetching cohort number {cohort_number}: {cohort.strftime(\"%Y-%m-%d\")}')\n sys.stdout.flush()\n cohort_report = reporter.fetch_cohort_report(\n cohort_start=cohort,\n cohort_end=cohort + cohort_interval,\n period=self.task.period,\n )\n cohort_report['cohort'] = cohort\n\n # filter out rows that are outside the task date range\n filter_report = cohort_report[['cohort', 'period']].copy()\n filter_report['effective_date'] = filter_report.cohort + filter_report.period.apply(lambda p: timedelta(days=p * self.task.period.days))\n filter_report.drop(filter_report.index[filter_report.effective_date < self.task.report_start_date], inplace=True)\n filter_report.drop(filter_report.index[filter_report.effective_date > self.task.report_end_date], inplace=True)\n cohort_report = cohort_report.loc[filter_report.index]\n \n if report is None:\n report = cohort_report\n else:\n report = report.append(cohort_report)\n cohort = cohort + timedelta(days=self.task.cohort_days)\n\n if report is not None:\n report.reset_index(drop=True, inplace=True)\n\n self.task.report = report\n", "sub_path": "fetching/fetch_adjust.py", "file_name": "fetch_adjust.py", "file_ext": "py", "file_size_in_byte": 3647, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "typing.TypeVar", "line_number": 11, "usage_type": "call"}, {"api_name": "tasks.FetchAdjustReportTask", "line_number": 11, "usage_type": "attribute"}, {"api_name": "typing.Generic", "line_number": 12, "usage_type": "name"}, {"api_name": "tasks.FetchAdjustDeliverablesReportTask", "line_number": 15, "usage_type": "attribute"}, {"api_name": "garfield.AdjustReporter", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 31, "usage_type": "call"}, {"api_name": "tasks.FetchAdjustEventsReportTask", "line_number": 38, "usage_type": "attribute"}, {"api_name": "garfield.AdjustReporter", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 54, "usage_type": "call"}, {"api_name": "tasks.FetchAdjustCohortsMeasuresReportTask", "line_number": 61, "usage_type": "attribute"}, {"api_name": "garfield.AdjustReporter", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 67, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 74, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 79, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 79, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "375759352", "text": "import sqlite3\n\n\ndef connect_to_db(db_name=\"rpg_db.sqlite3\"):\n return sqlite3.connect(db_name)\n\n\ndef execute_query(cursor, query):\n\n # this is takig the given cursor, and executing the given query\n # this is the total results of the query in another table.\n cursor.execute(query)\n\n # this is fetching and return the results.\n return cursor.fetchall()\n\n\n# this is a stament,but not results\nGET_CHARACTERS = \"SELECT * FROM charactercreator_character;\"\n\nGET_CHARACTERS2 = \"\"\"\n SELECT * \n FROM charactercreator_character;\n \"\"\"\n\n\nif __name__ == \"__main__\":\n\n # connect to database\n conn = connect_to_db()\n\n # make a cursor (something that iterates over DB)\n curs = conn.cursor()\n\n # Write query (inside python this will be a string)\n # Execute query\n # Fetch results\n results = execute_query(curs, GET_CHARACTERS)\n\n # display results\n print(results)\n", "sub_path": "module1-introduction-to-sql/rpg_db_example.py", "file_name": "rpg_db_example.py", "file_ext": "py", "file_size_in_byte": 902, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sqlite3.connect", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "189141542", "text": "from collections import defaultdict\nfrom train_svm import PREDICT_ONE, CREATE_FEATURES\ntest_f = '../../data/titles-en-test.word'\nmodel_f = 'model_file.txt'\n\ndef PREDICT_ALL(model_f, input_f):\n w = defaultdict(int)\n with open(model_f, 'r') as f1, open(input_f, 'r') as f2:\n for line in f1:\n words = line.strip().split()\n w[words[0]] = float(words[1])\n for x in f2:\n phi = dict()\n for k, v in CREATE_FEATURES(x).items():\n phi[k] = v\n n_y = PREDICT_ONE(w, phi)\n yield('{}\\t{}'. format(n_y, x))\n\nif __name__ == '__main__':\n with open('my_answer.labeled', 'w') as f:\n for line in PREDICT_ALL(model_f, test_f):\n f.write(line)\n", "sub_path": "naruhisa/tutorial06/test_svm.py", "file_name": "test_svm.py", "file_ext": "py", "file_size_in_byte": 742, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "collections.defaultdict", "line_number": 7, "usage_type": "call"}, {"api_name": "train_svm.CREATE_FEATURES", "line_number": 14, "usage_type": "call"}, {"api_name": "train_svm.PREDICT_ONE", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "481713439", "text": "import shutil\nimport requests\n\n\nclass GoogleMapsStaticAPIRequest(object):\n def __init__(self, maptype, coordinate, zoom, size):\n self.base_url = \"https://maps.googleapis.com/maps/api/staticmap\"\n self.key = \"\"\n self.maptype = maptype\n self.coordinate = coordinate\n self.zoom = zoom\n self.size = size\n\n def save_image(self, saved_image_path):\n static_api_request = requests.get(\n self.base_url + \"?\" +\n \"maptype\" + \"=\" + self.maptype +\n \"&\" + \"center\" + \"=\" +\n str(self.coordinate[0]) + \",\" +\n str(self.coordinate[1]) +\n \"&\" + \"zoom\" + \"=\" + str(self.zoom) + \"&\" + \"size\" + \"=\" +\n str(self.size[0]) + \"x\" + str(self.size[1]) +\n \"&\" + \"key\" + \"=\" + self.key,\n stream=True)\n with open(saved_image_path, 'wb') as image_file:\n static_api_request.raw.decode_content = True\n shutil.copyfileobj(static_api_request.raw, image_file)", "sub_path": "src/google_maps_static_api_request.py", "file_name": "google_maps_static_api_request.py", "file_ext": "py", "file_size_in_byte": 1013, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "345856233", "text": "import numpy as np\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.layers import Dense, Dropout, Flatten\r\nfrom keras.callbacks import TensorBoard\r\nimport h5py\r\nfrom keras.utils import to_categorical\r\nfrom keras.applications.vgg16 import VGG16\r\nfrom keras.models import Model\r\n\r\n# load data\r\nx_train = np.load(\"E:\\\\thesis_work\\\\RAF-DB\\\\np_data\\\\x_train_RAF-DB.npy\")\r\ny_train = np.load(\"E:\\\\thesis_work\\\\RAF-DB\\\\np_data\\\\y_train_RAF-DB.npy\")\r\nx_val = np.load(\"E:\\\\thesis_work\\\\RAF-DB\\\\np_data\\\\x_test_RAF-DB.npy\")\r\ny_val = np.load(\"E:\\\\thesis_work\\\\RAF-DB\\\\np_data\\\\y_test_RAF-DB.npy\")\r\n\r\n#covert label 1-7 to 0-6\r\ny_train = y_train - 1\r\ny_val = y_val -1\r\n\r\n#onehot encoding\r\ny_train_onehot = to_categorical(y_train)\r\ny_val_onehot = to_categorical(y_val)\r\n\r\n# data normalization\r\nx_train_n3 = (x_train - x_train.mean(axis=(0,1,2), keepdims=True)) / x_train.std(axis=(0,1,2), keepdims=True)\r\nx_val_n3 = (x_val - x_train.mean(axis=(0,1,2), keepdims=True)) / x_train.std(axis=(0,1,2), keepdims=True)\r\n\r\ndatagen = ImageDataGenerator()\r\ndatagen.fit(x_train_n3)\r\n\r\n# VGG16 with imagenet weights\r\nbase_model = VGG16(weights = \"imagenet\", include_top=False, input_shape =(100, 100, 3))\r\nbase_model.load_weights(\"C:\\\\Users\\\\student\\\\.keras\\\\models\\\\vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5\")\r\nx = base_model.output\r\nx = Flatten()(x)\r\nx = Dropout(0.3)(x)\r\npredictions = Dense(6, activation=\"softmax\")(x)\r\nmodel = Model(inputs = base_model.input, outputs = predictions)\r\n\r\n# freeze some layers\r\nfor layer in model.layers[10:18]:\r\n layer.trainable = False\r\n\r\n#tensorboard callback\r\ntensorboard = TensorBoard(log_dir=\"E:\\\\thesis_results\\\\tranfer_learning\\\\raf_db\\\\logs\\\\vgg_with_weights_small\")\r\n\r\nmodel.compile(optimizer='adam',\r\n loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n# fits the model on batches with real-time data augmentation:\r\nmodel.fit_generator(datagen.flow(x_train_n3, y_train_onehot, batch_size=100),\r\n steps_per_epoch=len(x_train_n3) / 100, epochs=20, verbose=2,\r\n validation_data=(x_val_n3, y_val_onehot), callbacks=[tensorboard])\r\n\r\nmodel.save(\"E:\\\\thesis_results\\\\tranfer_learning\\\\raf_db\\\\models\\\\vgg_with_weights_small.h5\")", "sub_path": "code_for_git/tranfer_learning/raf_db_tranfer.py", "file_name": "raf_db_tranfer.py", "file_ext": "py", "file_size_in_byte": 2229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.load", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.applications.vgg16.VGG16", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.callbacks.TensorBoard", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "167942949", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport tensorflow as tf\r\nimport random\r\nfrom tensorflow.python.keras.utils import data_utils\r\nfrom Config import Config\r\nfrom DatabaseGenerator import DataGenerator\r\nimport imageio\r\nimport pathlib\r\n\r\ngpus = tf.config.list_physical_devices('GPU')\r\nif gpus:\r\n try:\r\n # Currently, memory growth needs to be the same across GPUs\r\n for gpu in gpus:\r\n tf.config.experimental.set_memory_growth(gpu, True)\r\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\r\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\r\n except RuntimeError as e:\r\n # Memory growth must be set before GPUs have been initialized\r\n print(e)\r\n\r\nclass TrainOnCustomObject(Config):\r\n def __init__(self):\r\n \r\n db_gen = DataGenerator(\r\n self.NUM_BATCH_IN_EPOCH * self.BATCH_SIZE,\r\n self.IMG_SIZE, \r\n self.SAMPLE_DIR.encode(\"utf-8\"),\r\n self.FG_DIR.encode(\"utf-8\"), \r\n self.MASK_DIR.encode(\"utf-8\"), \r\n self.BG_DIR.encode(\"utf-8\")\r\n )\r\n \"\"\"\r\n db_gen = tf.data.Dataset.range(2).interleave(\r\n lambda _: db_gen,\r\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\r\n .batch(self.BATCH_SIZE, drop_remainder=True)\r\n .cache() \r\n .prefetch(tf.data.experimental.AUTOTUNE)\r\n .unbatch()\r\n \"\"\"\r\n \r\n db_gen = tf.data.Dataset.range(2).interleave(lambda _: db_gen, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(self.BATCH_SIZE, drop_remainder=True).cache().prefetch(tf.data.experimental.AUTOTUNE).unbatch()\r\n \r\n \r\n \r\n self.train_dataset = db_gen.batch(self.BATCH_SIZE)\r\n self.validation_dataset = db_gen.batch(self.BATCH_SIZE)\r\n self.test_dataset = db_gen.batch(self.BATCH_SIZE) \r\n \r\n preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input\r\n\r\n IMG_SHAPE = self.IMG_SIZE + (3,)\r\n self.base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,\r\n include_top=False,\r\n weights='imagenet')\r\n \r\n AUTOTUNE = tf.data.experimental.AUTOTUNE\r\n self.train_dataset = self.train_dataset.prefetch(buffer_size=self.BATCH_SIZE)\r\n\r\n image_batch, label_batch = next(iter(self.train_dataset))\r\n feature_batch = self.base_model(image_batch)\r\n self.base_model.trainable = False\r\n \r\n global_average_layer = tf.keras.layers.GlobalAveragePooling2D()\r\n feature_batch_average = global_average_layer(feature_batch)\r\n \r\n prediction_layer = tf.keras.layers.Dense(1, activation = \"tanh\")\r\n prediction_batch = prediction_layer(feature_batch_average)\r\n\r\n \r\n inputs = tf.keras.Input(shape=IMG_SHAPE)\r\n \r\n x = preprocess_input(inputs)\r\n x = self.base_model(x, training=False)\r\n x = global_average_layer(x)\r\n x = tf.keras.layers.Dropout(0.2)(x)\r\n outputs = prediction_layer(x)\r\n\r\n self.model = tf.keras.Model(inputs, outputs)\r\n self.model.compile(optimizer=tf.keras.optimizers.Adam(lr=self.LR),\r\n loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\r\n metrics=['accuracy'])\r\n\r\n return \r\n\r\n def get_model(self):\r\n return self.model\r\n #from Config import Config\r\n \r\n def create_folder(self, path):\r\n if not os.path.exists(path):\r\n print(\"Creating of new directory: \" + path )\r\n os.makedirs(path)\r\n \r\n def train_model(self, save_dir = None):\r\n \r\n history = self.model.fit(\r\n self.train_dataset,\r\n epochs=self.INITIAL_EPOCHS,\r\n validation_data = self.validation_dataset)\r\n\r\n acc = history.history['accuracy']\r\n val_acc = history.history['val_accuracy']\r\n\r\n loss = history.history['loss']\r\n val_loss = history.history['val_loss']\r\n\r\n # fine tuning \r\n\r\n self.base_model.trainable = True\r\n for layer in self.base_model.layers[:self.FINE_TUNE_AT]:\r\n layer.trainable = False\r\n\r\n self.model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),\r\n optimizer = tf.keras.optimizers.RMSprop(lr=self.FINE_TUNE_LR),\r\n metrics=['accuracy'])\r\n \r\n\r\n fine_tune_epochs = self.INITIAL_EPOCHS + self.FINE_TUNE_EPOCHS\r\n\r\n total_epochs = self.INITIAL_EPOCHS + self.FINE_TUNE_EPOCHS\r\n\r\n history_fine = self.model.fit(self.train_dataset,\r\n initial_epoch=history.epoch[-1],\r\n epochs=fine_tune_epochs,\r\n validation_data=self.validation_dataset)\r\n\r\n acc += history_fine.history['accuracy']\r\n val_acc += history_fine.history['val_accuracy']\r\n\r\n loss += history_fine.history['loss']\r\n val_loss += history_fine.history['val_loss']\r\n\r\n if save_dir:\r\n\r\n self.create_folder(save_dir)\r\n\r\n plt.figure(figsize=(8, 8))\r\n plt.subplot(2, 1, 1)\r\n plt.plot(acc, label='Training Accuracy')\r\n plt.plot(val_acc, label='Validation Accuracy')\r\n plt.ylim([0.3, 1])\r\n plt.plot([self.INITIAL_EPOCHS-1,self.INITIAL_EPOCHS-1],\r\n plt.ylim(), label='Start Fine Tuning')\r\n plt.legend(loc='lower right')\r\n plt.title('Training and Validation Accuracy')\r\n\r\n plt.subplot(2, 1, 2)\r\n plt.plot(loss, label='Training Loss')\r\n plt.plot(val_loss, label='Validation Loss')\r\n plt.ylim([0, 1.0])\r\n plt.plot([self.INITIAL_EPOCHS-1,self.INITIAL_EPOCHS-1],\r\n plt.ylim(), label='Start Fine Tuning')\r\n plt.legend(loc='upper right')\r\n plt.title('Training and Validation Loss')\r\n plt.xlabel('epoch')\r\n plt.savefig(save_dir+\"/trainig2.jpg\")\r\n\r\n return self.model\r\n\r\ntr = TrainOnCustomObject()\r\nmodel = tr.train_model(\"result_plot\")\r\n\r\ntflite_models_dir = pathlib.Path(\"Models/\")\r\nconverter = tf.lite.TFLiteConverter.from_keras_model(model)\r\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\r\nconverter.target_spec.supported_types = [tf.float16]\r\ntflite_fp16_model = converter.convert()\r\ntflite_model_fp16_file = tflite_models_dir/\"vwmodelv1.tflite\"\r\ntflite_model_fp16_file.write_bytes(tflite_fp16_model)\r\n", "sub_path": "Training.py", "file_name": "Training.py", "file_ext": "py", "file_size_in_byte": 6526, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "tensorflow.config.list_physical_devices", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tensorflow.config.experimental.set_memory_growth", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.config.experimental.list_logical_devices", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 18, "usage_type": "attribute"}, {"api_name": "Config.Config", "line_number": 24, "usage_type": "name"}, {"api_name": "DatabaseGenerator.DataGenerator", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.range", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.applications.MobileNetV2", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.GlobalAveragePooling2D", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Input", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Model", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.BinaryCrossentropy", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.BinaryCrossentropy", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 117, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.RMSprop", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 118, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.lite.TFLiteConverter.from_keras_model", "line_number": 168, "usage_type": "call"}, {"api_name": "tensorflow.lite", "line_number": 168, "usage_type": "attribute"}, {"api_name": "tensorflow.lite", "line_number": 169, "usage_type": "attribute"}, {"api_name": "tensorflow.float16", "line_number": 170, "usage_type": "attribute"}]} +{"seq_id": "602895583", "text": "from django.conf.urls import url, include\nfrom rest_framework import routers\nfrom rest_framework.authtoken.views import obtain_auth_token\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom users.views import AccountViewSet, UserViewSet, UserDetail, UserByName, LoginView, LogoutView\nfrom posts import views\nfrom django.contrib import admin\nfrom intern.views import IndexView\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', AccountViewSet)\nrouter.register(r'archives', views.ArchiveViewSet)\nrouter.register(r'songs', views.SongViewSet)\nrouter.register(r'current', views.CurrentUser)\n\n\nuser_list = UserViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nuser_detail = UserViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'post': 'create'\n})\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^api/', include(router.urls)),\n url(r'^api/auth/login/$', LoginView.as_view(), name='login'),\n url(r'^api/auth/logout/$', LogoutView.as_view(), name='logout'),\n url(r'^.*$', IndexView.as_view(), name='index'),\n]\n", "sub_path": "intern/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1111, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 10, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 10, "usage_type": "name"}, {"api_name": "users.views.AccountViewSet", "line_number": 11, "usage_type": "argument"}, {"api_name": "posts.views.ArchiveViewSet", "line_number": 12, "usage_type": "attribute"}, {"api_name": "posts.views", "line_number": 12, "usage_type": "name"}, {"api_name": "posts.views.SongViewSet", "line_number": 13, "usage_type": "attribute"}, {"api_name": "posts.views", "line_number": 13, "usage_type": "name"}, {"api_name": "posts.views.CurrentUser", "line_number": 14, "usage_type": "attribute"}, {"api_name": "posts.views", "line_number": 14, "usage_type": "name"}, {"api_name": "users.views.UserViewSet.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "users.views.UserViewSet", "line_number": 17, "usage_type": "name"}, {"api_name": "users.views.UserViewSet.as_view", "line_number": 22, "usage_type": "call"}, {"api_name": "users.views.UserViewSet", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 30, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "users.views.LoginView.as_view", "line_number": 32, "usage_type": "call"}, {"api_name": "users.views.LoginView", "line_number": 32, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "users.views.LogoutView.as_view", "line_number": 33, "usage_type": "call"}, {"api_name": "users.views.LogoutView", "line_number": 33, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "intern.views.IndexView.as_view", "line_number": 34, "usage_type": "call"}, {"api_name": "intern.views.IndexView", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "152979576", "text": "import json\nimport select\nimport socket\nimport sys\nimport thread\n\nimport config_file_helper\n\n\ndef load_settings():\n SETTINGS_FILE = \"server_settings.json\"\n settings_defaults = {\n 'host': \"127.0.0.1\",\n 'port': 8000,\n 'max_clients': 10\n }\n\n # load settings from config file SETTINGS_FILE\n settings_data = config_file_helper.load_config_file(\n SETTINGS_FILE, settings_defaults)\n\n print(\"Settings loaded from \" + SETTINGS_FILE)\n\n # assign global variables to loaded settings\n global HOST\n global PORT\n global MAX_CLIENTS\n\n HOST = settings_data['host']\n PORT = settings_data['port']\n MAX_CLIENTS = settings_data['max_clients']\n\n\ndef input_thread():\n global server_created\n global server_running\n\n while not server_created:\n while server_running:\n user_input = raw_input()\n if user_input == \"q\" or user_input == \"exit\":\n server_running = False\n\n\ndef recv_packet(sock):\n BUFFER_SIZE = 1024\n packet = sock.recv(BUFFER_SIZE)\n decoded_packet = json.loads(packet)\n return decoded_packet['message']\n\n\ndef server():\n global server_created\n global server_running\n\n server_running = True\n try:\n server_socket = socket.socket()\n server_socket.bind((HOST, PORT))\n server_socket.listen(True)\n print(\"Server running at \" + HOST + \":\" + str(PORT))\n print(\"Type 'q' or 'exit' to quit.\")\n except socket.error:\n sys.exit(\"Unable to create server.\")\n\n server_created = True\n server_running = True\n\n socket_list = [server_socket]\n\n while(server_running):\n readable_sockets, writeable_sockets, errored_sockets = (\n select.select(socket_list, [], [], 1))\n\n for sock in readable_sockets:\n if sock is server_socket:\n client_socket, address = server_socket.accept()\n readable_sockets.append(client_socket)\n print (\"A new client has connected from \" + address[0])\n else:\n packet = recv_packet(sock)\n if packet:\n print(packet)\n else:\n sock.close()\n readable_sockets.remove(sock)\n print (\"Client disconnected.\")\n\n print (\"Server stopped.\")\n\n\ndef main():\n global server_created\n\n load_settings()\n server_created = False\n\n try:\n thread.start_new_thread(input_thread, ())\n except thread.error:\n sys.exit(\"Unable to create threads.\")\n\n server()\n\n\nmain()\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "config_file_helper.load_config_file", "line_number": 19, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 48, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 58, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 63, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 64, "usage_type": "call"}, {"api_name": "select.select", "line_number": 73, "usage_type": "call"}, {"api_name": "thread.start_new_thread", "line_number": 99, "usage_type": "call"}, {"api_name": "thread.error", "line_number": 100, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "361818815", "text": "\"\"\"'employee'\n\nRevision ID: 900f84ca2e41\nRevises: f18beda458df\nCreate Date: 2020-03-13 10:44:06.123405\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '900f84ca2e41'\ndown_revision = 'f18beda458df'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('employee', sa.Column('address', sa.String(length=100), nullable=True))\n op.add_column('employee', sa.Column('age', sa.Integer(), nullable=True))\n op.add_column('employee', sa.Column('depart', sa.String(length=100), nullable=True))\n op.add_column('employee', sa.Column('email', sa.String(length=100), nullable=True))\n op.add_column('employee', sa.Column('id_card', sa.Integer(), nullable=True))\n op.add_column('employee', sa.Column('phone', sa.String(length=11), nullable=True))\n op.add_column('employee', sa.Column('post', sa.String(length=100), nullable=True))\n op.add_column('employee', sa.Column('sex', sa.Boolean(), nullable=True))\n op.add_column('employee', sa.Column('work_num', sa.String(length=6), nullable=True))\n op.create_unique_constraint(None, 'employee', ['email'])\n op.create_unique_constraint(None, 'employee', ['phone'])\n op.create_unique_constraint(None, 'employee', ['id_card'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'employee', type_='unique')\n op.drop_constraint(None, 'employee', type_='unique')\n op.drop_constraint(None, 'employee', type_='unique')\n op.drop_column('employee', 'work_num')\n op.drop_column('employee', 'sex')\n op.drop_column('employee', 'post')\n op.drop_column('employee', 'phone')\n op.drop_column('employee', 'id_card')\n op.drop_column('employee', 'email')\n op.drop_column('employee', 'depart')\n op.drop_column('employee', 'age')\n op.drop_column('employee', 'address')\n # ### end Alembic commands ###\n", "sub_path": "代码/Management/migrations/versions/900f84ca2e41_employee.py", "file_name": "900f84ca2e41_employee.py", "file_ext": "py", "file_size_in_byte": 2016, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 26, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 26, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 26, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op.create_unique_constraint", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "alembic.op.create_unique_constraint", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}, {"api_name": "alembic.op.create_unique_constraint", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 38, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 39, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 39, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 40, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 40, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 41, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 41, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 42, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 42, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 43, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 43, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 44, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 44, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 45, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 45, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 46, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 46, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 47, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 47, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 48, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 48, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 49, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "475474943", "text": "#!/usr/bin/env python3\n# encoding: utf-8\n# @Time : 2018/8/2 下午3:23\n# @Author : yuchangqian\n# @Contact : changqian_yu@163.com\n# @File : engine.py\nimport os\nimport os.path as osp\nimport time\n\nimport cv2\nimport numpy as np\nimport random\n\nimport shutil\n\nimport torch\nimport torch.distributed as dist\nimport torch.backends.cudnn as cudnn\n\nfrom .logger import get_logger\nfrom .version import __version__\nfrom modules.utils.pyt_utils import load_model, link_file, ensure_dir\n\nfrom modules.utils.tb_logger import Logger\n\n\nclass State(object):\n def __init__(self):\n self.epoch = 0\n self.iteration = 0\n self.dataloader = None\n self.model = None\n self.optimizer = None\n\n def register(self, **kwargs):\n for k, v in kwargs.items():\n assert k in ['epoch', 'iteration', 'dataloader', 'model',\n 'optimizer']\n setattr(self, k, v)\n\n\nclass Engine(object):\n def __init__(self, config=None):\n \"\"\"\n :param config: easydict\n \"\"\"\n self.version = __version__\n self.logger = get_logger()\n self.logger.info(\n \"PyTorch Version {}, MyTorch Version {}\".format(torch.__version__,\n self.version))\n self.state = State()\n self.distributed = False\n self.local_rank = 0\n\n self.config = config\n self.continue_state_object = self.config.model.continue_path\n\n if 'WORLD_SIZE' in os.environ:\n self.distributed = int(os.environ['WORLD_SIZE']) > 1 or torch.cuda.device_count() > 1\n self.amp = False if self.config.get('amp') is None else True\n\n if self.config.environ.deterministic:\n cudnn.benchmark = False\n cudnn.deterministic = True\n torch.set_printoptions(precision=10)\n else:\n cudnn.benchmark = True\n\n if self.amp:\n assert torch.backends.cudnn.enabled, \"Amp requires cudnn backend to be enabled.\"\n\n # set random seed\n torch.manual_seed(config.environ.seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(config.environ.seed)\n np.random.seed(config.environ.seed)\n random.seed(config.environ.seed)\n\n if self.distributed:\n self.world_size = int(os.environ['WORLD_SIZE'])\n dist.init_process_group(backend=\"nccl\", init_method='env://')\n self.local_rank = dist.get_rank()\n torch.cuda.set_device(self.local_rank)\n self.logger.info('world size: {}, local rank: {}.'.format(self.world_size, self.local_rank))\n else:\n raise NotImplementedError\n\n # tensorboard logger\n if self.local_rank == 0:\n self.tb_logger = Logger(self.config.log.snapshot_dir)\n\n def register_state(self, **kwargs):\n self.state.register(**kwargs)\n\n def update_iteration(self, epoch, iteration):\n self.state.epoch = epoch\n self.state.iteration = iteration\n\n def save_checkpoint(self, path):\n self.logger.info(\"Saving checkpoint to file {}\".format(path))\n t_start = time.time()\n\n state_dict = {}\n\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in self.state.model.state_dict().items():\n key = k\n if k.split('.')[0] == 'module':\n key = k[7:]\n new_state_dict[key] = v\n\n state_dict['model'] = new_state_dict\n state_dict['optimizer'] = self.state.optimizer.state_dict()\n state_dict['epoch'] = self.state.epoch\n state_dict['iteration'] = self.state.iteration\n\n t_iobegin = time.time()\n torch.save(state_dict, path)\n del state_dict\n del new_state_dict\n t_end = time.time()\n self.logger.info(\n \"Save checkpoint to file {}, \"\n \"Time usage:\\n\\tprepare snapshot: {}, IO: {}\".format(\n path, t_iobegin - t_start, t_end - t_iobegin))\n\n def save_and_link_checkpoint(self, snapshot_dir):\n ensure_dir(snapshot_dir)\n # if not osp.exists(log_dir_link):\n # link_file(log_dir, log_dir_link)\n current_epoch_checkpoint = osp.join(snapshot_dir, 'epoch-{}.pth'.format(\n self.state.epoch))\n self.save_checkpoint(current_epoch_checkpoint)\n last_epoch_checkpoint = osp.join(snapshot_dir,\n 'epoch-last.pth')\n link_file(current_epoch_checkpoint, last_epoch_checkpoint)\n\n def save_images(self, snapshot_dir, filenames, image):\n img_saved_dir = osp.join(snapshot_dir, 'imgs')\n ensure_dir(img_saved_dir)\n filenames = osp.join(img_saved_dir, filenames)\n if image is not None:\n cv2.imwrite(filenames, image)\n\n def copy_config(self, snapshot_dir, config_file):\n ensure_dir(snapshot_dir)\n assert osp.exists(config_file), \"config file is not existed.\"\n new_file_name = osp.join(snapshot_dir, 'config.json')\n shutil.copy(config_file, new_file_name)\n\n def restore_checkpoint(self):\n t_start = time.time()\n if self.distributed:\n # tmp = torch.load(self.continue_state_object,\n # map_location=lambda storage, loc: storage.cuda(\n # self.local_rank))\n tmp = torch.load(self.continue_state_object, map_location=torch.device('cpu'))\n else:\n tmp = torch.load(self.continue_state_object)\n t_ioend = time.time()\n\n self.state.model = load_model(self.state.model, tmp['model'],\n True)\n self.state.optimizer.load_state_dict(tmp['optimizer'])\n self.state.epoch = tmp['epoch'] + 1\n self.state.iteration = tmp['iteration']\n del tmp\n t_end = time.time()\n self.logger.info(\n \"Load checkpoint from file {}, \"\n \"Time usage:\\n\\tIO: {}, restore snapshot: {}\".format(\n self.continue_state_object, t_ioend - t_start, t_end - t_ioend))\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, tb):\n torch.cuda.empty_cache()\n if type is not None:\n self.logger.warning(\n \"A exception occurred during Engine initialization, \"\n \"give up pspnet_ade process\")\n return False\n\n if self.local_rank == 0:\n self.tb_logger.close()\n", "sub_path": "modules/engine/engine.py", "file_name": "engine.py", "file_ext": "py", "file_size_in_byte": 6488, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "version.__version__", "line_number": 48, "usage_type": "name"}, {"api_name": "logger.get_logger", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.__version__", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 61, "usage_type": "attribute"}, {"api_name": "torch.cuda.device_count", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 61, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.backends.cudnn.deterministic", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.set_printoptions", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.backends", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 76, "usage_type": "attribute"}, {"api_name": "torch.cuda.manual_seed", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 78, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 79, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.distributed.init_process_group", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.distributed.get_rank", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.cuda.set_device", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 85, "usage_type": "attribute"}, {"api_name": "modules.utils.tb_logger.Logger", "line_number": 92, "usage_type": "call"}, {"api_name": "time.time", "line_number": 103, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 108, "usage_type": "call"}, {"api_name": "time.time", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 121, "usage_type": "call"}, {"api_name": "time.time", "line_number": 124, "usage_type": "call"}, {"api_name": "modules.utils.pyt_utils.ensure_dir", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "name"}, {"api_name": "modules.utils.pyt_utils.link_file", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "name"}, {"api_name": "modules.utils.pyt_utils.ensure_dir", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 146, "usage_type": "call"}, {"api_name": "modules.utils.pyt_utils.ensure_dir", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "name"}, {"api_name": "shutil.copy", "line_number": 152, "usage_type": "call"}, {"api_name": "time.time", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 162, "usage_type": "call"}, {"api_name": "time.time", "line_number": 163, "usage_type": "call"}, {"api_name": "modules.utils.pyt_utils.load_model", "line_number": 165, "usage_type": "call"}, {"api_name": "time.time", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 181, "usage_type": "attribute"}]} +{"seq_id": "202708164", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 18 17:08:18 2020\n\n@author: BreezeCat\n\"\"\"\nimport sys\nimport tensorflow as tf\nimport json\nimport random\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport datetime\nimport copy\nimport file_manger\nimport state_load as SL\nimport os\nimport Agent\nimport Network\nimport configparser\nimport Combination\n\ntf.reset_default_graph()\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.20) \ncolor_list = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\n\n####\n#Common parameter\n####\nPI = math.pi\nresX = 0.1 # resolution of X\nresY = 0.1 # resolution of Y\nresTH = PI/15\nLOG_DIR = 'logs/Multi_test'\nSOME_TAG = '_test4'\n\n####\n#Reward\n####\nArrived_reward = 1\nTime_out_penalty = -0.25\nCollision_high_penalty = -0.5\nCollision_low_penalty = -1\nCollision_equ_penalty = -0.75\n\n\n'''\nDL Parameter\n'''\ntraining_eposide_num = 5000 #100000 \ntraining_num = 1500 #3000\ntest_num = 1\ntwo_robot_Network_Path = '2_robot_network/gamma09_95_0429/test.ckpt'\n\n'''\nMotion Parameter\n'''\ndeltaT = 0.1 #unit:s\nV_max = 3 #m/s\nW_max = 2 #rad/s\nlinear_acc_max = 10 #m/s^2\nangular_acc_max = 7 #rad/s^2\nsize_min = 0.1 #unit:m\nx_upper_bound = 5 #unit:m\nx_lower_bound = -5 #unit:m\ny_upper_bound = 5 #unit:m\ny_lower_bound = -5 #unit:m\nTIME_OUT_FACTOR = 4\n\n\nRL_eposide_num = 100\nRL_epsilon = 0\ngamma = 0.9\n\nNetwork_Path_Dict = {'2':'2_robot_network/gamma09_95_0429/test.ckpt', \n '3':'multi_robot_network/3_robot_network/0826_933/3_robot.ckpt',\n '4':'multi_robot_network/4_robot_network/1020_92/4_robot.ckpt'\n }\n\n\n\n\ndef Load_Config(file):\n print('Load config from ' + file)\n config = configparser.ConfigParser()\n config.read(file)\n configDict = {section: dict(config.items(section)) for section in config.sections()}\n print(configDict)\n return configDict\n\ndef Set_parameter(paraDict):\n global deltaT, V_max, W_max, linear_acc_max, angular_acc_max, size_min, TIME_OUT_FACTOR\n print('Set parameter\\n', paraDict)\n deltaT = float(paraDict['deltat'])\n V_max, W_max, linear_acc_max, angular_acc_max = float(paraDict['v_max']), float(paraDict['w_max']), float(paraDict['linear_acc_max']), float(paraDict['angular_acc_max'])\n size_min = float(paraDict['size_min'])\n TIME_OUT_FACTOR = float(paraDict['time_out_factor'])\n \ndef Build_network(session, robot_num, base_network):\n Network_set = []\n item_list = [str(i+2) for i in range(robot_num-1)]\n Comb_list = Combination.Combination_list(item_list, base_network - 1)\n for item in Comb_list:\n name = '1'\n for i in item: name += i\n Network_set.append(Network.Network_Dict[str(base_network)](name))\n if len(Network_set) == 1:\n Target_network = Network_set[0]\n Train_Dict = {}\n Train_Dict['ref_value'] = tf.placeholder(tf.float32, [None, 1])\n Train_Dict['cost'] = tf.losses.mean_squared_error(Target_network.value, Train_Dict['ref_value'])\n Train_Dict['loss'] = Train_Dict['cost']\n Train_Dict['loss_record'] = tf.summary.scalar('loss',Train_Dict['loss'])\n Train_Dict['train_step'] = tf.train.AdamOptimizer(1e-3).minimize(Train_Dict['loss'])\n init = tf.global_variables_initializer()\n session.run(init)\n with tf.name_scope('Pred_value'):\n pred_value = Target_network.value\n \n else:\n Train_Dict = None\n with tf.name_scope('Pred_value'):\n smaller_value_list = [Network_set[0].value]\n for i in range(len(Network_set)-1):\n smaller_value_list.append(tf.minimum(smaller_value_list[i], Network_set[i+1].value))\n pred_value = smaller_value_list[-1]\n \n for item in Network_set:\n item.restore_parameter(session, Network_Path_Dict[str(base_network)])\n \n return pred_value, Network_set, Train_Dict\n\n\n\ndef Calculate_distance(x1, y1, x2, y2):\n return np.sqrt(math.pow( (x1-x2) , 2) + math.pow( (y1-y2) , 2))\n\ndef Check_Collision(agent1, agent2):\n distance = Calculate_distance(agent1.state.Px, agent1.state.Py, agent2.state.Px, agent2.state.Py)\n if (distance <= (agent1.state.r + agent2.state.r)):\n return True\n else:\n return False\n\n\ndef Check_Goal(agent, position_tolerance, orientation_tolerance): \n position_error = Calculate_distance(agent.state.Px, agent.state.Py, agent.gx, agent.gy)\n orientation_error = abs(agent.state.Pth - agent.gth)\n if (position_error < position_tolerance) and (orientation_error < orientation_tolerance):\n return True\n else:\n return False\n\ndef Random_Agent(name):\n Px = random.random()*(x_upper_bound - x_lower_bound) + x_lower_bound\n Py = random.random()*(y_upper_bound - y_lower_bound) + y_lower_bound\n Pth = random.random()*2*PI \n V = 0 #(random.random() - 0.5) * V_max\n W = 0 #(random.random() - 0.5) * W_max\n r = random.random() + size_min\n gx = random.random()*(x_upper_bound - x_lower_bound) + x_lower_bound\n gy = random.random()*(y_upper_bound - y_lower_bound) + y_lower_bound\n gth = random.random()*2*PI \n rank = random.randint(1,3)\n return Agent.Agent(name, Px, Py, Pth, V, W, r, gx, gy, gth, rank, mode = 'Greedy')\n\ndef Set_Agent(name):\n Px = float(input('Px(-5~5m): '))\n Py = float(input('Py(-5~5m): '))\n Pth = float(input('Pth(0~6.28): '))\n V = 0 #(random.random() - 0.5) * V_max\n W = 0 #(random.random() - 0.5) * W_max\n r = float(input('r(0.1~1m): '))\n gx = float(input('gx(-5~5m): '))\n gy = float(input('gy(-5~5m): '))\n gth = float(input('gth(0~6.28): '))\n rank = int(input('rnak(1.2.3): '))\n return Agent.Agent(name, Px, Py, Pth, V, W, r, gx, gy, gth, rank, mode = 'Greedy')\n\ndef Predict_action_value(main_agent, Agent_Set, V_pred, W_pred, base_network):\n Other_Set, State_list = [], []\n for agent in Agent_Set:\n if main_agent.name != agent.name:\n Other_Set.append(agent)\n Comb_Set = Combination.Combination_list(Other_Set, base_network-1)\n \n pred_state = main_agent.Predit_state(V_pred, W_pred, dt = deltaT)\n obs_gx, obs_gy, obs_gth = main_agent.Relative_observed_goal(pred_state.Px, pred_state.Py, pred_state.Pth)\n \n for Comb_item in Comb_Set:\n other_state = [V_pred, W_pred, main_agent.state.r, obs_gx, obs_gy, obs_gth, V_max] \n for agent in Comb_item:\n obs_state = agent.Relative_observed_state(pred_state.Px, pred_state.Py, pred_state.Pth) \n m11, m12, m13 = 0, 0, 0\n if main_agent.rank > agent.rank: \n m11 = 1\n elif main_agent.rank < agent.rank: \n m13 = 1\n else: \n m12 = 1\n other_state += [m11, m12, m13, obs_state.x, obs_state.y, obs_state.Vx, obs_state.Vy, obs_state.r]\n State_list.append([other_state])\n \n if len(State_list) == len(Network_list):\n state_dict = {}\n for i in range(len(State_list)):\n state_dict[Network_list[i].state] = State_list[i]\n else:\n print('robot num error')\n return 0\n value_matrix = sess.run(Value, feed_dict = state_dict)\n \n R = 0\n \n main_agent_pred = Agent.Agent('Pred', pred_state.Px, pred_state.Py, pred_state.Pth, pred_state.V, pred_state.W, pred_state.r, main_agent.gx, main_agent.gy, main_agent.gth, main_agent.rank)\n if Check_Goal(main_agent_pred, Calculate_distance(resX, resY, 0, 0), resTH):\n R = Arrived_reward\n for item in Agent_Set:\n if main_agent.name != item.name:\n if Check_Collision(main_agent, item):\n if main_agent.rank > item.rank:\n R = Collision_high_penalty\n elif main_agent.rank < item.rank: \n R = Collision_low_penalty\n else: \n R = Collision_equ_penalty\n break\n action_value = R + value_matrix[0][0]\n \n return action_value\n\n\ndef Choose_action_from_Network(main_agent, Agent_Set, epsilon, base_network):\n dice = random.random()\n action_value_max = -999999 \n if dice < epsilon:\n linear_acc = -linear_acc_max + random.random() * 2 * linear_acc_max\n angular_acc = -angular_acc_max + random.random() * 2 * angular_acc_max\n V_pred = np.clip(main_agent.state.V + linear_acc * deltaT, -V_max, V_max)\n W_pred = np.clip(main_agent.state.W + angular_acc * deltaT, -W_max, W_max)\n else:\n linear_acc_set = np.arange(-linear_acc_max, linear_acc_max, 1)\n angular_acc_set = np.arange(-angular_acc_max, angular_acc_max, 1)\n for linear_acc in linear_acc_set:\n V_pred = np.clip(main_agent.state.V + linear_acc * deltaT, -V_max, V_max)\n for angular_acc in angular_acc_set:\n W_pred = np.clip(main_agent.state.W + angular_acc * deltaT, -W_max, W_max)\n action_value = Predict_action_value(main_agent, Agent_Set, V_pred, W_pred, base_network)\n if action_value > action_value_max:\n action_value_max = action_value\n action_pair = [V_pred, W_pred] \n V_pred = action_pair[0]\n W_pred = action_pair[1]\n #print(action_value_max)\n return V_pred, W_pred\n\n\ndef Choose_action(main_agent, Agent_Set, base_network):\n if main_agent.mode == 'Static':\n V_next, W_next = 0, 0\n if main_agent.mode == 'Random':\n V_next = main_agent.state.V + random.random() - 0.5\n W_next = main_agent.state.W + random.random() - 0.5\n if main_agent.mode == 'Greedy':\n V_next, W_next = Choose_action_from_Network(main_agent, Agent_Set, 0, base_network)\n \n return V_next, W_next\n\ndef Show_Path(Agent_Set, result, save_path):\n plt.close('all')\n plt.figure(figsize=(12,12))\n ax = plt.gca()\n ax.cla() \n ax.set_xlim((x_lower_bound,x_upper_bound)) #上下限\n ax.set_ylim((x_lower_bound,x_upper_bound))\n plt.xlabel('X(m)')\n plt.ylabel('Y(m)')\n color_count = 0\n for agent in Agent_Set:\n agent.Plot_Path(ax = ax, color = color_list[color_count%len(color_list)])\n agent.Plot_goal(ax = ax, color = color_list[color_count%len(color_list)])\n color_count += 1\n NOW = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')\n plt.savefig(save_path +'/'+ NOW + result +'.png')\n return\n\ndef RL_process(robot_num, eposide_num, epsilon, RL_SAVE_PATH, base_network): \n for eposide in range(eposide_num):\n \n if eposide%20 == 0:\n print(eposide)\n Main_Agent = Random_Agent('Main')\n Agent_Set = [Main_Agent]\n for i in range(robot_num-1):\n Agent_Set.append(Random_Agent(str(i+2))) \n \n time = 0\n result = 'Finish'\n \n Collision_Flag = False\n Goal_dist_Flag = False\n for item in Agent_Set:\n for item2 in Agent_Set:\n if item.name != item2.name:\n Collision_Flag = Collision_Flag or Check_Collision(item, item2)\n Goal_dist_Flag = Goal_dist_Flag or Calculate_distance(item.gx, item.gy, item2.gx, item2.gy) < (item.state.r + item2.state.r)\n if Collision_Flag or Goal_dist_Flag:\n break\n if Collision_Flag or Goal_dist_Flag:\n break\n if Collision_Flag or Goal_dist_Flag:\n continue\n\n if Check_Goal(Main_Agent, Calculate_distance(resX, resY, 0, 0), resTH):\n continue\n \n TIME_OUT = Calculate_distance(Main_Agent.state.Px, Main_Agent.state.Py, Main_Agent.gx, Main_Agent.gy) * TIME_OUT_FACTOR\n \n \n NOW = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')\n save_path = RL_SAVE_PATH + '/' + NOW\n os.makedirs(save_path)\n while(not Check_Goal(Main_Agent, Calculate_distance(resX, resY, 0, 0), resTH)):\n if time > TIME_OUT:\n result = 'TIME_OUT'\n break\n \n for item in Agent_Set:\n if Main_Agent.name != item.name:\n if Check_Collision(Main_Agent, item):\n if Main_Agent.rank > item.rank:\n result = 'Collision_high'\n elif Main_Agent.rank < item.rank: \n result = 'Collision_low'\n else: \n result = 'Collision_equal'\n break\n if result != 'Finish':\n break\n else:\n for agent in Agent_Set:\n if Check_Goal(agent, Calculate_distance(resX, resY, 0, 0), resTH):\n V_next, W_next = 0, 0 \n else:\n V_next, W_next = Choose_action(agent, Agent_Set, base_network)\n agent.Set_V_W(V_next, W_next)\n \n for agent in Agent_Set:\n agent.Update_state(dt = deltaT)\n \n time = time + deltaT\n \n \n for agent in Agent_Set:\n agent.Record_data(save_path)\n Show_Path(Agent_Set, result, RL_SAVE_PATH)\n \n return\n\n\ndef RL_process_all_Goal(robot_num, eposide_num, epsilon, RL_SAVE_PATH, base_network): \n for eposide in range(eposide_num):\n if eposide%20 == 0:\n print(eposide)\n Main_Agent = Random_Agent('Main')\n Agent_Set = [Main_Agent]\n for i in range(robot_num-1):\n Agent_Set.append(Random_Agent(str(i+2))) \n \n time = 0\n \n Collision_Flag = False\n Goal_dist_Flag = False\n for item in Agent_Set:\n for item2 in Agent_Set:\n if item.name != item2.name:\n Collision_Flag = Collision_Flag or Check_Collision(item, item2)\n Goal_dist_Flag = Goal_dist_Flag or Calculate_distance(item.gx, item.gy, item2.gx, item2.gy) < (item.state.r + item2.state.r)\n if Collision_Flag or Goal_dist_Flag:\n break\n if Collision_Flag or Goal_dist_Flag:\n break\n if Collision_Flag or Goal_dist_Flag:\n continue\n\n if Check_Goal(Main_Agent, Calculate_distance(resX, resY, 0, 0), resTH):\n continue\n \n TIME_OUT = 0\n for agent in Agent_Set:\n TIME_OUT = max(TIME_OUT, Calculate_distance(agent.state.Px, agent.state.Py, agent.gx, agent.gy) * TIME_OUT_FACTOR)\n \n \n terminal_flag = True\n for agent in Agent_Set:\n small_goal_flag = Check_Goal(agent, Calculate_distance(resX, resY, 0, 0), resTH)\n if small_goal_flag:\n agent.Goal_state = 'Finish'\n terminal_flag = terminal_flag and small_goal_flag\n \n NOW = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')\n save_path = RL_SAVE_PATH + '/' + NOW\n os.makedirs(save_path)\n while(not terminal_flag): \n for agent1 in Agent_Set: \n for agent2 in Agent_Set:\n if agent1.name != agent2.name:\n if Check_Collision(agent1, agent2):\n if agent1.rank > agent2.rank:\n if agent1.Goal_state == 'Not':\n agent1.Goal_state = 'Collision_high'\n if agent2.Goal_state == 'Not':\n agent2.Goal_state = 'Collision_low'\n elif agent1.rank < agent2.rank:\n if agent1.Goal_state == 'Not':\n agent1.Goal_state = 'Collision_low'\n if agent2.Goal_state == 'Not':\n agent2.Goal_state = 'Collision_high'\n else:\n if agent1.Goal_state == 'Not':\n agent1.Goal_state = 'Collision_equal'\n if agent2.Goal_state == 'Not':\n agent2.Goal_state = 'Collision_equal'\n if Check_Goal(agent1, Calculate_distance(resX, resY, 0, 0), resTH) and agent1.Goal_state == 'Not':\n agent1.Goal_state = 'Finish'\n\n\n terminal_flag = True\n for agent in Agent_Set:\n if agent.Goal_state == 'Not':\n V_next, W_next = Choose_action(agent, Agent_Set, base_network)\n else:\n V_next, W_next = 0, 0 \n agent.Set_V_W(V_next, W_next)\n terminal_flag = terminal_flag and agent.Goal_state != 'Not'\n \n if time > TIME_OUT:\n for agent in Agent_Set:\n if agent.Goal_state == 'Not':\n agent.Goal_state = 'TIME_OUT'\n break\n \n for agent in Agent_Set:\n agent.Update_state(dt = deltaT) \n time = time + deltaT\n \n result = ''\n for agent in Agent_Set:\n result = result + agent.Goal_state[0]\n agent.Record_data(save_path)\n Show_Path(Agent_Set, result, save_path)\n return\n\n\ndef TEST_process_all_Goal(robot_num, eposide_num, epsilon, RL_SAVE_PATH, base_network): \n for eposide in range(eposide_num):\n if eposide%20 == 0:\n print(eposide)\n Main_Agent = Set_Agent('Main')\n Agent_Set = [Main_Agent]\n for i in range(robot_num-1):\n Agent_Set.append(Set_Agent(str(i+2))) \n \n time = 0\n \n Collision_Flag = False\n Goal_dist_Flag = False\n for item in Agent_Set:\n for item2 in Agent_Set:\n if item.name != item2.name:\n Collision_Flag = Collision_Flag or Check_Collision(item, item2)\n Goal_dist_Flag = Goal_dist_Flag or Calculate_distance(item.gx, item.gy, item2.gx, item2.gy) < (item.state.r + item2.state.r)\n if Collision_Flag or Goal_dist_Flag:\n break\n if Collision_Flag or Goal_dist_Flag:\n break\n if Collision_Flag or Goal_dist_Flag:\n continue\n\n if Check_Goal(Main_Agent, Calculate_distance(resX, resY, 0, 0), resTH):\n continue\n \n TIME_OUT = 0\n for agent in Agent_Set:\n TIME_OUT = max(TIME_OUT, Calculate_distance(agent.state.Px, agent.state.Py, agent.gx, agent.gy) * TIME_OUT_FACTOR)\n \n \n terminal_flag = True\n for agent in Agent_Set:\n small_goal_flag = Check_Goal(agent, Calculate_distance(resX, resY, 0, 0), resTH)\n if small_goal_flag:\n agent.Goal_state = 'Finish'\n terminal_flag = terminal_flag and small_goal_flag\n \n NOW = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')\n save_path = RL_SAVE_PATH + '/' + NOW\n os.makedirs(save_path)\n while(not terminal_flag): \n for agent1 in Agent_Set: \n for agent2 in Agent_Set:\n if agent1.name != agent2.name:\n if Check_Collision(agent1, agent2):\n if agent1.rank > agent2.rank:\n if agent1.Goal_state == 'Not':\n agent1.Goal_state = 'Collision_high'\n if agent2.Goal_state == 'Not':\n agent2.Goal_state = 'Collision_low'\n elif agent1.rank < agent2.rank:\n if agent1.Goal_state == 'Not':\n agent1.Goal_state = 'Collision_low'\n if agent2.Goal_state == 'Not':\n agent2.Goal_state = 'Collision_high'\n else:\n if agent1.Goal_state == 'Not':\n agent1.Goal_state = 'Collision_equal'\n if agent2.Goal_state == 'Not':\n agent2.Goal_state = 'Collision_equal'\n if Check_Goal(agent1, Calculate_distance(resX, resY, 0, 0), resTH) and agent1.Goal_state == 'Not':\n agent1.Goal_state = 'Finish'\n\n\n terminal_flag = True\n for agent in Agent_Set:\n if agent.Goal_state == 'Not':\n V_next, W_next = Choose_action(agent, Agent_Set, base_network)\n else:\n V_next, W_next = 0, 0 \n agent.Set_V_W(V_next, W_next)\n terminal_flag = terminal_flag and agent.Goal_state != 'Not'\n \n if time > TIME_OUT:\n for agent in Agent_Set:\n if agent.Goal_state == 'Not':\n agent.Goal_state = 'TIME_OUT'\n break\n \n for agent in Agent_Set:\n agent.Update_state(dt = deltaT) \n time = time + deltaT\n \n result = ''\n for agent in Agent_Set:\n result = result + agent.Goal_state[0]\n agent.Record_data(save_path)\n Show_Path(Agent_Set, result, save_path)\n return\n\n\nif __name__ == '__main__':\n NOW = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')\n if len(sys.argv) < 2:\n Configfile = input('Config file at:')\n else:\n Configfile = sys.argv[1]\n Config_dict = Load_Config(Configfile)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n \n if 'base' in Config_dict['main']:\n Base = int(Config_dict['main']['base'])\n else:\n Base = 2\n print('Test with ' + str(Base) + ' Network')\n \n Value, Network_list, Train = Build_network(sess, int(Config_dict['main']['robot_num']), Base)\n \n if int(Config_dict['main']['custom_parameter']):\n Set_parameter(Config_dict['parameter'])\n \n if int(Config_dict['main']['all_goal']):\n print('All goal process')\n save_path = Config_dict['main']['save_path'] + '/' + NOW +'_all_goal'\n os.makedirs(save_path)\n RL_process_all_Goal(int(Config_dict['main']['robot_num']), int(Config_dict['main']['eposide_num']), epsilon = 1, RL_SAVE_PATH = save_path, base_network = Base)\n else:\n save_path = Config_dict['main']['save_path'] + '/' + NOW +'_main_goal'\n os.makedirs(save_path)\n RL_process(int(Config_dict['main']['robot_num']), int(Config_dict['main']['eposide_num']), epsilon = 1, RL_SAVE_PATH = save_path, base_network = Base)\n", "sub_path": "multi_robot_test.py", "file_name": "multi_robot_test.py", "file_ext": "py", "file_size_in_byte": 22828, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "tensorflow.reset_default_graph", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.GPUOptions", "line_number": 25, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 31, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 86, "usage_type": "call"}, {"api_name": "Combination.Combination_list", "line_number": 103, "usage_type": "call"}, {"api_name": "Network.Network_Dict", "line_number": 107, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 111, "usage_type": "attribute"}, {"api_name": "tensorflow.losses.mean_squared_error", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.losses", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 114, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 115, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.minimum", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 137, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 137, "usage_type": "call"}, {"api_name": "random.random", "line_number": 156, "usage_type": "call"}, {"api_name": "random.random", "line_number": 157, "usage_type": "call"}, {"api_name": "random.random", "line_number": 158, "usage_type": "call"}, {"api_name": "random.random", "line_number": 161, "usage_type": "call"}, {"api_name": "random.random", "line_number": 162, "usage_type": "call"}, {"api_name": "random.random", "line_number": 163, "usage_type": "call"}, {"api_name": "random.random", "line_number": 164, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 165, "usage_type": "call"}, {"api_name": "Agent.Agent", "line_number": 166, "usage_type": "call"}, {"api_name": "Agent.Agent", "line_number": 179, "usage_type": "call"}, {"api_name": "Combination.Combination_list", "line_number": 186, "usage_type": "call"}, {"api_name": "Agent.Agent", "line_number": 216, "usage_type": "call"}, {"api_name": "random.random", "line_number": 235, "usage_type": "call"}, {"api_name": "random.random", "line_number": 238, "usage_type": "call"}, {"api_name": "random.random", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 248, "usage_type": "call"}, {"api_name": "random.random", "line_number": 263, "usage_type": "call"}, {"api_name": "random.random", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 273, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 284, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 284, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 321, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 321, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 323, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 402, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 402, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 404, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 496, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 496, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 498, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 551, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 551, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 552, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 555, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 557, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 557, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 573, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 577, "usage_type": "call"}]} +{"seq_id": "483629043", "text": "import pyglet\nfrom pyglet.window import key\n\nfrom sprite import Sprite\n\nclass Tesla(Sprite):\n \"\"\" This class defines tesla sprites \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Tesla, self).__init__(*args, **kwargs)\n\n self.is_on_ground = False\n self.velocity_x = 0\n self.velocity_y = 0\n\n def copy_attributes(self, other):\n \"\"\" Copies attributes from another copy of tesla \"\"\"\n\n self.x = other.x\n self.y = other.y\n self.is_on_ground = other.is_on_ground\n self.velocity_x = other.velocity_x\n self.velocity_y = other.velocity_y\n\n def touches_ground(self, environment_sprite):\n \"\"\" Checks if the player has landed on solid ground \"\"\"\n\n # Use dt and velocity_y to calculate if the player has intersected the environment_sprite during this last iteration\n\n second_y_2bound = environment_sprite.y + environment_sprite.height\n second_x_1bound, second_x_2bound = environment_sprite.x, environment_sprite.x + environment_sprite.width\n\n if ((second_x_1bound < self.x and second_x_2bound > self.x) or (second_x_1bound < self.x + self.width and second_x_2bound > self.x + self.width)) and self.y <= second_y_2bound and self.y >= second_y_2bound - environment_sprite.height:\n return True\n\n return False\n\n def update(self, dt):\n \"\"\" Update sprite shenaniganz\n dt: time delta, the change in time\n \"\"\"\n self.x += dt * self.velocity_x\n self.y += dt * self.velocity_y\n\n def on_key_press(self, symbol, modifiers):\n \"\"\" Key press event handler\n symbol: the symbol(key) pressed\n modifiers: the extra keys pressed (ex. Ctrl or Alt)\n\n Called by the on_key_press function in the GameWindow class\n \"\"\"\n if symbol == key.A:\n self.velocity_x = -100.0\n if symbol == key.D:\n self.velocity_x = 100.0\n if (symbol == key.SPACE or symbol == key.W) and self.velocity_y == 0:\n self.velocity_y = 500.0\n\n def on_key_release(self, symbol, modifiers):\n if symbol == key.A and self.velocity_x < 0:\n self.velocity_x = 0.0\n if symbol == key.D and self.velocity_x > 0:\n self.velocity_x = 0.0\n\n", "sub_path": "Core/tesla.py", "file_name": "tesla.py", "file_ext": "py", "file_size_in_byte": 2281, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sprite.Sprite", "line_number": 6, "usage_type": "name"}, {"api_name": "pyglet.window.key.A", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pyglet.window.key", "line_number": 52, "usage_type": "name"}, {"api_name": "pyglet.window.key.D", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pyglet.window.key", "line_number": 54, "usage_type": "name"}, {"api_name": "pyglet.window.key.SPACE", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pyglet.window.key", "line_number": 56, "usage_type": "name"}, {"api_name": "pyglet.window.key.W", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pyglet.window.key.A", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pyglet.window.key", "line_number": 60, "usage_type": "name"}, {"api_name": "pyglet.window.key.D", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pyglet.window.key", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "176807373", "text": "from django.shortcuts import render, redirect\nfrom repository.models import *\nfrom django.http import JsonResponse\nfrom backend.auth import check_login\nfrom utils.Pagination import Pager2\nfrom backend.forms.article import *\nfrom django.db import transaction\n\n\n@check_login\ndef index(request):\n return render(request, 'backend/backend_index.html')\n\n\n@check_login\ndef article(request, *args, **kwargs):\n condition = {}\n url = {}\n bid = request.session.get('user')['pb__id']\n for k, v in kwargs.items():\n temp = int(v)\n kwargs[k] = temp\n url[k] = v\n if int(v) == 0:\n pass\n else:\n condition[k] = v\n condition['blog_id'] = bid\n base_url = '/backend/article' + '-' + url['article_type_id'] + '-' + url['classify_id']\n article_list = BlogArticle.objects.filter(**condition).all().order_by('-add_date')\n count = BlogArticle.objects.filter(**condition).count()\n types = BlogArticle.type_choices\n classify_list = ArticleClassify.objects.filter(blog_id=bid)\n pagination = Pager2.Pagination(count, request.GET.get('p'), 5, 5)\n article_list = article_list[pagination.start():pagination.end()]\n page_str = pagination.page_str(base_url)\n context = {\n 'count': count,\n 'types': types,\n 'classify_list': classify_list,\n 'article_list': article_list,\n 'condition': condition,\n 'kwargs': kwargs,\n 'page_str': page_str,\n }\n return render(request, 'backend/article/backend_article.html', context=context)\n\n\n@check_login\ndef add_article(request):\n if request.method == 'GET':\n obj = ArticleForm(request=request)\n return render(request, 'backend/article/backend_add_article.html', {'obj': obj})\n elif request.method == 'POST':\n obj = ArticleForm(request=request, data=request.POST)\n if obj.is_valid():\n with transaction.atomic():\n tags = obj.cleaned_data.pop('tags')\n content = obj.cleaned_data.pop('content')\n obj.cleaned_data['blog_id'] = request.session.get('user')['pb__id']\n aid = BlogArticle.objects.create(**obj.cleaned_data)\n BlogArticleDetail.objects.create(content=content, bid=aid)\n tag_list = []\n for tag_id in tags:\n tag_id = int(tag_id)\n tag_list.append(TagsToArticle(article_id=aid.id, tag_id=tag_id))\n TagsToArticle.objects.bulk_create(tag_list)\n return redirect('/backend/article-0-0')\n else:\n return render(request, 'backend/article/backend_add_article.html', {'obj': obj})\n else:\n return redirect('/')\n\n\n@check_login\ndef del_article(request):\n ret = {'status': False, 'message': None}\n aid = request.POST.get('aid')\n try:\n BlogArticle.objects.filter(id=aid).delete()\n BlogArticleDetail.objects.filter(bid=aid).delete()\n ret['status'] = True\n return JsonResponse(ret)\n except Exception as e:\n ret['message'] = e\n return JsonResponse(ret)\n\n\n@check_login\ndef edit_article(request, aid):\n bid = request.session.get('user')['pb__id']\n if request.method == 'GET':\n art = BlogArticle.objects.filter(blog_id=bid, id=aid).first()\n if not art:\n return render(request, 'backend/article/backend_no_article.html')\n content = BlogArticleDetail.objects.filter(bid=aid).first()\n if not content:\n BlogArticleDetail.objects.create(bid_id=art.id)\n content = BlogArticleDetail.objects.filter(bid=aid).first()\n tags = art.tags.values_list('id')\n if tags:\n tags = list(zip(*tags))[0]\n context = {\n 'aid': art.id,\n 'title': art.title,\n 'summary': art.summary,\n 'content': content.content,\n 'article_type_id': art.article_type_id,\n 'classify_id': art.classify_id,\n 'tags': tags,\n }\n obj = ArticleForm(request=request, initial=context)\n return render(request, 'backend/article/backend_edit_article.html', {'obj': obj, 'aid': aid})\n elif request.method == 'POST':\n obj = ArticleForm(request=request, data=request.POST)\n if obj.is_valid():\n art = BlogArticle.objects.filter(blog_id=bid, id=aid).first()\n if not art:\n return render(request, 'backend/article/backend_no_article.html')\n with transaction.atomic():\n tags = obj.cleaned_data.pop('tags')\n content = obj.cleaned_data.pop('content')\n BlogArticle.objects.filter(blog_id=bid,id=aid).update(**obj.cleaned_data)\n BlogArticleDetail.objects.filter(bid=aid).update(content=content)\n TagsToArticle.objects.filter(article=aid).delete()\n tag_list = []\n for tag_id in tags:\n tag_id = int(tag_id)\n tag_list.append(TagsToArticle(article_id=aid, tag_id=tag_id))\n TagsToArticle.objects.bulk_create(tag_list)\n return redirect('/backend/article-0-0')\n else:\n return render(request, 'backend/article/backend_add_article.html', {'obj': obj})\n else:\n return redirect('/')\n", "sub_path": "backend/view/user.py", "file_name": "user.py", "file_ext": "py", "file_size_in_byte": 5295, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}, {"api_name": "backend.auth.check_login", "line_number": 10, "usage_type": "name"}, {"api_name": "utils.Pagination.Pager2.Pagination", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.Pagination.Pager2", "line_number": 34, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "backend.auth.check_login", "line_number": 15, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 57, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 68, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 72, "usage_type": "call"}, {"api_name": "backend.auth.check_login", "line_number": 49, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 83, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 86, "usage_type": "call"}, {"api_name": "backend.auth.check_login", "line_number": 75, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 95, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 113, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 119, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 120, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 120, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 131, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 133, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 135, "usage_type": "call"}, {"api_name": "backend.auth.check_login", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "581233092", "text": "from methods.get_all_user_info import get_all_user_info\nimport tornado.web\nfrom ..basehandler import BaseHandler\nfrom methods.ormoperator import OrmOperator\nfrom models.auth import User,UserRole\n\nclass DelUserHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n user_info = get_all_user_info()\n for user in user_info:\n user[2] = '【'+'】,【'.join(user[2].split(','))+'】'\n self.render('admin/deluser.html',user_info=user_info)\n def post(self):\n worker_id = self.get_argument('delete')\n oo_ur = OrmOperator(UserRole)\n if 'admin' in oo_ur.query_all('role_name',worker_id=worker_id):\n self.write('''\n

无法删除管理员,将跳转到之前页

''')\n else:\n oo_u = OrmOperator(User)\n oo_u.delete(worker_id=worker_id)\n oo_ur = OrmOperator(UserRole)\n oo_ur.delete(worker_id=worker_id)\n self.write('''''')", "sub_path": "handlers/admin/deluser.py", "file_name": "deluser.py", "file_ext": "py", "file_size_in_byte": 1273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "basehandler.BaseHandler", "line_number": 7, "usage_type": "name"}, {"api_name": "methods.get_all_user_info.get_all_user_info", "line_number": 10, "usage_type": "call"}, {"api_name": "tornado.web.web", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 8, "usage_type": "name"}, {"api_name": "methods.ormoperator.OrmOperator", "line_number": 16, "usage_type": "call"}, {"api_name": "models.auth.UserRole", "line_number": 16, "usage_type": "argument"}, {"api_name": "methods.ormoperator.OrmOperator", "line_number": 21, "usage_type": "call"}, {"api_name": "models.auth.User", "line_number": 21, "usage_type": "argument"}, {"api_name": "methods.ormoperator.OrmOperator", "line_number": 23, "usage_type": "call"}, {"api_name": "models.auth.UserRole", "line_number": 23, "usage_type": "argument"}]} +{"seq_id": "251282373", "text": "\"\"\"instituteproject URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\n\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom instituteapp import views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$',views.home),\n url(r'^home/',views.home),\n url(r'^contact/',views.contact),\n url(r'^services/',views.services),\n url(r'^feedback/',views.feedback),\n url(r'^gallery/',views.gallery)\n]\n", "sub_path": "instituteproject/instituteproject/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 555, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "instituteapp.views.home", "line_number": 14, "usage_type": "attribute"}, {"api_name": "instituteapp.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "instituteapp.views.home", "line_number": 15, "usage_type": "attribute"}, {"api_name": "instituteapp.views", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "instituteapp.views.contact", "line_number": 16, "usage_type": "attribute"}, {"api_name": "instituteapp.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "instituteapp.views.services", "line_number": 17, "usage_type": "attribute"}, {"api_name": "instituteapp.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "instituteapp.views.feedback", "line_number": 18, "usage_type": "attribute"}, {"api_name": "instituteapp.views", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "instituteapp.views.gallery", "line_number": 19, "usage_type": "attribute"}, {"api_name": "instituteapp.views", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "572162483", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 30 09:22:56 2020\r\n\r\n@author: elpea\r\n\"\"\"\r\n\r\nfrom keras.models import load_model\r\nimport numpy as np\r\nimport cv2 \r\n\r\n\r\nmodel = load_model('model_34_0.9720.h5')\r\n\r\nimg_size = 500\r\ntest_image = cv2.imread('IMG_3143.JPG',cv2.IMREAD_GRAYSCALE)\r\ntest_image = cv2.resize(test_image,(img_size,img_size))\r\n\r\ntest_image = test_image.reshape(1, img_size, img_size, 1)\r\n\r\n#predict the result\r\nresult = model.predict(test_image)\r\nCATEGORIES = [\"Buff Orpington\",\"Rhode Island Red\",\"Silver Laced Wyandotte\",\"White Leghorn\"]\r\nresult = list(result[0,:])\r\ncat = np.argmax(result)\r\nprint(CATEGORIES[cat], ':', 100 * float(result[cat]))\r\ndel CATEGORIES[cat]\r\ndel result[cat]\r\ncat2 = np.argmax(result)\r\nprint(CATEGORIES[cat2], ':', 100*float(result[cat2]))\r\ndel CATEGORIES[cat2]\r\ndel result[cat2]\r\ncat3 = np.argmax(result)\r\nprint(CATEGORIES[cat3], ':', 100*float(result[cat3]))", "sub_path": "keras/Classifier 10/predictor_10.py", "file_name": "predictor_10.py", "file_ext": "py", "file_size_in_byte": 904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "keras.models.load_model", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "373458423", "text": "import urllib\r\n\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth.models import User, UserManager\r\nfrom django.core.urlresolvers import reverse\r\nfrom django.http import HttpResponseRedirect, HttpResponse\r\nfrom django.shortcuts import render_to_response, get_object_or_404, get_list_or_404\r\nfrom django.template import RequestContext, Context, loader\r\nfrom django.utils import simplejson\r\n\r\nfrom haystack.forms import SearchForm\r\nfrom haystack.query import EmptySearchQuerySet\r\n\r\nfrom epic.core.models import Item\r\nfrom epic.core.util.view_utils import *\r\nfrom epic.datarequests.models import DataRequest\r\nfrom epic.datasets.models import DataSet\r\nfrom epic.projects.models import Project\r\nfrom epic.projects.util.util import *\r\nfrom epic.search import perform_search\r\nfrom epic.search import perform_search_for_item\r\n\r\n\r\ndef get_search(request):\r\n if 'q' in request.GET and request.GET.get('q'):\r\n return HttpResponseRedirect(reverse(\r\n 'all-query-search', kwargs={'query': request.GET.get('q')}))\r\n else:\r\n return HttpResponseRedirect(reverse('all-empty-search'))\r\n\r\ndef search_all(request, query=None):\r\n display_form = SearchForm(initial={'q': query,}, load_all=True)\r\n\r\n if query:\r\n data_requests = perform_search_for_item('DataRequest', query)[:3]\r\n datasets = perform_search_for_item('DataSet', query)[:3]\r\n projects = perform_search_for_item('Project', query)[:3]\r\n template_objects = {\r\n 'data_requests': data_requests, 'datasets': datasets, 'projects': projects,\r\n }\r\n else:\r\n template_objects = {}\r\n\r\n return generic_search(request, query, template_objects, 'search/search_all.html')\r\n\r\ndef search_data_requests(request, query=None):\r\n display_form = SearchForm(initial={'q': query,}, load_all=True)\r\n\r\n if query is not None and query != '':\r\n data_requests_page = paginate(perform_search_for_item('DataRequest', query), request.GET)\r\n template_objects = {'data_requests_page': data_requests_page,}\r\n else:\r\n template_objects = {}\r\n\r\n return generic_search(request, query, template_objects, 'search/search_data_requests.html')\r\n\r\ndef search_datasets(request, query=None):\r\n display_form = SearchForm(initial={'q': query,}, load_all=True)\r\n\r\n if query is not None and query != '':\r\n datasets_page = paginate(perform_search_for_item('DataSet', query), request.GET)\r\n template_objects = {'datasets_page': datasets_page,}\r\n else:\r\n template_objects = {}\r\n\r\n return generic_search(request, query, template_objects, 'search/search_datasets.html')\r\n\r\ndef search_projects(request, query=None):\r\n display_form = SearchForm(initial={'q': query,}, load_all=True)\r\n\r\n if query is not None and query != '':\r\n projects_page = paginate(perform_search_for_item('Project', query), request.GET)\r\n template_objects = {'projects_page': projects_page,}\r\n else:\r\n template_objects = {}\r\n\r\n return generic_search(request, query, template_objects, 'search/search_projects.html')\r\n\r\ndef generic_search(request, query, template_objects, template):\r\n display_form = SearchForm(initial={'q': query,}, load_all=True)\r\n\r\n if query is not None and query != '':\r\n render_to_response_data = {'form': display_form, 'query': query}\r\n\r\n for key in template_objects:\r\n render_to_response_data[key] = template_objects[key]\r\n\r\n return render_to_response(\r\n template, render_to_response_data, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\r\n template,\r\n {'form': display_form, 'query': query,},\r\n context_instance=RequestContext(request))\r\n", "sub_path": "branches/epic/2011-07-07_test-categories-migration-first/search/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3764, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.http.HttpResponseRedirect", "line_number": 26, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 26, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 29, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 29, "usage_type": "call"}, {"api_name": "haystack.forms.SearchForm", "line_number": 32, "usage_type": "call"}, {"api_name": "epic.search.perform_search_for_item", "line_number": 35, "usage_type": "call"}, {"api_name": "epic.search.perform_search_for_item", "line_number": 36, "usage_type": "call"}, {"api_name": "epic.search.perform_search_for_item", "line_number": 37, "usage_type": "call"}, {"api_name": "haystack.forms.SearchForm", "line_number": 47, "usage_type": "call"}, {"api_name": "epic.search.perform_search_for_item", "line_number": 50, "usage_type": "call"}, {"api_name": "haystack.forms.SearchForm", "line_number": 58, "usage_type": "call"}, {"api_name": "epic.search.perform_search_for_item", "line_number": 61, "usage_type": "call"}, {"api_name": "haystack.forms.SearchForm", "line_number": 69, "usage_type": "call"}, {"api_name": "epic.search.perform_search_for_item", "line_number": 72, "usage_type": "call"}, {"api_name": "haystack.forms.SearchForm", "line_number": 80, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 88, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 89, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 91, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "303870571", "text": "from sklearn.linear_model import Lasso\n#import functions needed to set up the dataset\nfrom prepare_data import create_testdata\nfrom preprocessing import preprocessing\n\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, KFold\nfrom sklearn.pipeline import make_pipeline\nimport numpy as np\n\n#create datasets\ndf=preprocessing()\nfeatures_train,windspeed_train,features_test,windspeed_test=create_testdata(df,'200')\n\nkfold=10 #default 10-fold cross validation\n\nreg = Lasso(alpha=0.1)\n# specify parameters to compute grid search on the the parameter scoring function\nparam_dist = {\"alpha\": [1e-3, 1e-2, 1e-1, 1]}\n\n# Choose cross-validation techniques for the inner and outer loops,\n# independently of the dataset.\n# E.g \"GroupKFold\", \"LeaveOneOut\", \"LeaveOneGroupOut\", etc.\ninner_cv = KFold(n_splits=kfold, shuffle=True, random_state=1)\nouter_cv = KFold(n_splits=kfold, shuffle=True, random_state=1)\n\nclf = GridSearchCV(estimator=reg, param_grid=param_dist,scoring='neg_mean_squared_error', cv=inner_cv)\n# Nested CV with parameter optimization\nnested_score = cross_val_score(clf, features_train, windspeed_train, scoring='neg_mean_squared_error',cv=outer_cv)\n\nclf.fit(features_train,windspeed_train)\n\nprint(\"Best model has mse of {} with standard deviation {}.\".format(nested_score.mean(),nested_score.std()))\nprint(clf.best_params_)\n\n#now predict the windspeed for test set and output final error (mse)\n\ny_predict=clf.predict(features_test)\nfinal_error=-np.sum(np.square(y_predict-windspeed_test))/len(windspeed_test)\nprint('mse error on test set is {}.'.format(final_error))\n", "sub_path": "Regression Algorithms/lassoregression.py", "file_name": "lassoregression.py", "file_ext": "py", "file_size_in_byte": 1586, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "preprocessing.preprocessing", "line_number": 11, "usage_type": "call"}, {"api_name": "prepare_data.create_testdata", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Lasso", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "529047313", "text": "# 1 导入库\nimport dlib\nimport cv2\nimport matplotlib.pyplot as plt\n# 2 方法:人脸绘制矩形框\ndef plot_rectangle(image,faces):\n for face in faces:\n cv2.rectangle(image,(face.left(),face.top()),(face.right(),face.bottom()),(255,0,0),3)\n return image\n# 3 主函数\ndef main():\n # 4 打开摄像头,读取视频\n image=[]\n captrue = cv2.VideoCapture(0)\n # 5 判断摄像头是否工作\n if captrue.isOpened() is False:\n print(\"Camera Error\")\n # 6 读取每一帧\n while True:\n ret,frame = captrue.read()\n if ret:\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n # 7 获取检测器(dlib)\n detector = dlib.get_frontal_face_detector()\n dets_result = detector(gray, 1)\n # 8 绘制结果\n dets_image = plot_rectangle(frame,dets_result)\n # 9 显示结果\n cv2.imshow(\"face detection with dlib\",dets_image)\n # 10 按键退出\n if cv2.waitKey(1) == 27:\n image=dets_image\n break\n\n # 11 释放资源\n captrue.release()\n cv2.destroyAllWindows()\n show_image(dets_image,'dsb')\n plt.show()\ndef show_image(image,title):\n img_RGB = image[:,:,::-1]\n plt.title(title)\n plt.imshow(img_RGB)\n plt.axis(\"off\")\nif __name__ == '__main__':\n main()", "sub_path": "OpenCV/face_detection_dlib/face_detection_dlib_video.py", "file_name": "face_detection_dlib_video.py", "file_ext": "py", "file_size_in_byte": 1356, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "cv2.rectangle", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 22, "usage_type": "attribute"}, {"api_name": "dlib.get_frontal_face_detector", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "419448965", "text": "import cassiopeia.type.dto.common\r\nimport cassiopeia.type.core.common\r\n\r\n\r\nif cassiopeia.type.dto.common.sqlalchemy_imported:\r\n import sqlalchemy\r\n import sqlalchemy.orm\r\n\r\n\r\n@cassiopeia.type.core.common.inheritdocs\r\nclass TournamentCodeParameters(cassiopeia.type.dto.common.CassiopeiaParametersDto):\r\n \"\"\"\r\n teamSize int the team size of the game. Valid values are 1-5.\r\n spectatorType str the spectator type of the game. Valid values are NONE, LOBBYONLY, ALL.\r\n pickType str the pick type of the game. Valid values are BLIND_PICK, DRAFT_MODE, ALL_RANDOM, TOURNAMENT_DRAFT.\r\n mapType str the map type of the game. Valid values are SUMMONERS_RIFT, TWISTED_TREELINE, CRYSTAL_SCAR, and HOWLING_ABYSS.\r\n allowedSummonerIds SummonerIdParams optional list of participants in order to validate the players eligible to join the lobby. NOTE: We currently do not enforce participants at the team level, but rather the aggregate of teamOne and teamTwo. We may add the ability to enforce at the team level in the future.\r\n metadata str optional string that may contain any data in any format, if specified at all. Used to denote any custom information about the game.\r\n \"\"\"\r\n def __init__(self, teamSize, spectatorType, pickType, mapType, allowedSummonerIds=None, metadata=\"\"):\r\n self.teamSize = teamSize\r\n self.spectatorType = spectatorType\r\n self.pickType = pickType\r\n self.mapType = mapType\r\n self.allowedSummonerIds = allowedSummonerIds\r\n self.metadata = metadata\r\n\r\n\r\n@cassiopeia.type.core.common.inheritdocs\r\nclass SummonerIdParams(cassiopeia.type.dto.common.CassiopeiaParametersDto):\r\n \"\"\"\r\n participants list the tournament participants\r\n \"\"\"\r\n def __init__(self, participants):\r\n self.participants = participants\r\n\r\n\r\n@cassiopeia.type.core.common.inheritdocs\r\nclass TournamentCode(cassiopeia.type.dto.common.CassiopeiaDto):\r\n \"\"\"\r\n code str the tournament code\r\n id int the tournament code's ID\r\n lobbyName str the lobby name for the tournament code game\r\n map str the game map for the tournament code game\r\n metaData str the metadata for tournament code\r\n participants list the IDs of the summoners participating in the tournament\r\n password str the password for the tournament code game\r\n pickType str the pick mode for tournament code game\r\n providerId int the provider's ID\r\n region str the tournament code's region (Legal values: BR, EUNE, EUW, JP, KR, LAN, LAS, NA, OCE, PBE, RU, TR)\r\n spectators str the spectator mode for the tournament code game\r\n teamSize int the team size for the tournament code game\r\n tournamentId int the tournament's ID\r\n \"\"\"\r\n def __init__(self, dictionary):\r\n self.code = dictionary.get(\"code\", \"\")\r\n self.id = dictionary.get(\"id\", 0)\r\n self.lobbyName = dictionary.get(\"lobbyName\", \"\")\r\n self.map = dictionary.get(\"map\", \"\")\r\n self.metaData = dictionary.get(\"metaData\", \"\")\r\n self.participants = dictionary.get(\"participants\", [])\r\n self.password = dictionary.get(\"password\", \"\")\r\n self.pickType = dictionary.get(\"pickType\", \"\")\r\n self.providerId = dictionary.get(\"providerId\", 0)\r\n self.region = dictionary.get(\"region\", \"\")\r\n self.spectators = dictionary.get(\"spectators\", \"\")\r\n self.teamSize = dictionary.get(\"teamSize\", 0)\r\n self.tournamentId = dictionary.get(\"tournamentId\", 0)\r\n\r\n\r\n@cassiopeia.type.core.common.inheritdocs\r\nclass TournamentCodeUpdateParameters(cassiopeia.type.dto.common.CassiopeiaParametersDto):\r\n \"\"\"\r\n allowedParticipants str comma separated list of summoner Ids\r\n spectatorType str the spectator type (Legal values: NONE, LOBBYONLY, ALL)\r\n pickType str the pick type (Legal values: BLIND_PICK, DRAFT_MODE, ALL_RANDOM, TOURNAMENT_DRAFT)\r\n mapType str the map type (Legal values: SUMMONERS_RIFT, CRYSTAL_SCAR, TWISTED_TREELINE, HOWLING_ABYSS)\r\n \"\"\"\r\n def __init__(self, allowedParticipants=\"\", spectatorType=\"\", pickType=\"\", mapType=\"\"):\r\n self.allowedParticipants = allowedParticipants\r\n self.spectatorType = spectatorType\r\n self.pickType = pickType\r\n self.mapType = mapType\r\n\r\n\r\n@cassiopeia.type.core.common.inheritdocs\r\nclass LobbyEventWrapper(cassiopeia.type.dto.common.CassiopeiaDto):\r\n \"\"\"\r\n eventList list the list of events\r\n \"\"\"\r\n def __init__(self, dictionary):\r\n self.eventList = [(LobbyEvent(event) if not isinstance(event, LobbyEvent) else event) for event in dictionary.get(\"eventList\", []) if event]\r\n\r\n\r\n@cassiopeia.type.core.common.inheritdocs\r\nclass LobbyEvent(cassiopeia.type.dto.common.CassiopeiaDto):\r\n \"\"\"\r\n eventType str the type of event that was triggered\r\n summonerId str the summoner that triggered the event\r\n timestamp str timestamp from the event\r\n \"\"\"\r\n def __init__(self, dictionary):\r\n self.eventType = dictionary.get(\"eventType\", \"\")\r\n self.summonerId = dictionary.get(\"summonerId\", \"\")\r\n self.timestamp = dictionary.get(\"timestamp\", \"\")\r\n\r\n\r\n@cassiopeia.type.core.common.inheritdocs\r\nclass ProviderRegistrationParameters(cassiopeia.type.dto.common.CassiopeiaParametersDto):\r\n \"\"\"\r\n region str the region in which the provider will be running tournaments (Legal values: BR, EUNE, EUW, JP, KR, LAN, LAS, NA, OCE, PBE, RU, TR)\r\n url str the provider's callback URL to which tournament game results in this region should be posted. The URL must be well-formed, use the http or https protocol, and use the default port for the protocol (http URLs must use port 80, https URLs must use port 443).\r\n \"\"\"\r\n def __init__(self, region, url):\r\n self.region = region\r\n self.url = url\r\n\r\n\r\n@cassiopeia.type.core.common.inheritdocs\r\nclass TournamentRegistrationParameters(cassiopeia.type.dto.common.CassiopeiaParametersDto):\r\n \"\"\"\r\n providerId int the provider ID to specify the regional registered provider data to associate this tournament\r\n name str the optional name of the tournament\r\n \"\"\"\r\n def __init__(self, providerId, name=\"\"):\r\n self.providerId = providerId\r\n self.name = name\r\n\r\n\r\n###############################\r\n# Dynamic SQLAlchemy bindings #\r\n###############################\r\ndef _sa_bind_tournament_code():\r\n global TournamentCode\r\n\r\n @cassiopeia.type.core.common.inheritdocs\r\n class TournamentCode(TournamentCode, cassiopeia.type.dto.common.BaseDB):\r\n __tablename__ = \"TournamentCode\"\r\n code = sqlalchemy.Column(sqlalchemy.String(50))\r\n id = sqlalchemy.Column(sqlalchemy.BigInteger)\r\n lobbyName = sqlalchemy.Column(sqlalchemy.String(50))\r\n map = sqlalchemy.Column(sqlalchemy.String(30))\r\n metaData = sqlalchemy.Column(sqlalchemy.Text)\r\n participants = sqlalchemy.Column(cassiopeia.type.dto.common.JSONEncoded)\r\n password = sqlalchemy.Column(sqlalchemy.String(30))\r\n pickType = sqlalchemy.Column(sqlalchemy.String(30))\r\n providerId = sqlalchemy.Column(sqlalchemy.Integer)\r\n region = sqlalchemy.Column(sqlalchemy.String(30))\r\n spectators = sqlalchemy.Column(sqlalchemy.String(30))\r\n teamSize = sqlalchemy.Column(sqlalchemy.Integer)\r\n tournamentId = sqlalchemy.Column(sqlalchemy.BigInteger)\r\n\r\n\r\ndef _sa_bind_lobby_event():\r\n global LobbyEvent\r\n\r\n @cassiopeia.type.core.common.inheritdocs\r\n class LobbyEvent(LobbyEvent, cassiopeia.type.dto.common.BaseDB):\r\n __tablename__ = \"LobbyEvent\"\r\n eventType = sqlalchemy.Column(sqlalchemy.String(50))\r\n summonerId = sqlalchemy.Column(sqlalchemy.String(30))\r\n timestamp = sqlalchemy.Column(sqlalchemy.String(50))\r\n\r\n\r\ndef _sa_bind_all():\r\n _sa_bind_tournament_code()\r\n _sa_bind_lobby_event()\r\n", "sub_path": "api/cassiopeia/cassiopeia/type/dto/tournament.py", "file_name": "tournament.py", "file_ext": "py", "file_size_in_byte": 8266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "cassiopeia.type.dto.common.type", "line_number": 5, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 5, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 11, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 10, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 30, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 29, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 39, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 38, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 72, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 72, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 71, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 71, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 87, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 87, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 86, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 86, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 96, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 96, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 95, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 95, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 109, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 109, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 108, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 120, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 120, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 119, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 119, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 137, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 137, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 139, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 139, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 140, "usage_type": "call"}, {"api_name": "sqlalchemy.BigInteger", "line_number": 140, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 141, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 141, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 142, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 142, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 143, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 143, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 144, "usage_type": "call"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 144, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 144, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 145, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 145, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 146, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 146, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 147, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 147, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 148, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 148, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 149, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 149, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 150, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 150, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 151, "usage_type": "call"}, {"api_name": "sqlalchemy.BigInteger", "line_number": 151, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 136, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 136, "usage_type": "name"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 158, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 158, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 160, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 160, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 161, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 161, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 162, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 162, "usage_type": "call"}, {"api_name": "cassiopeia.type.dto.common.type", "line_number": 157, "usage_type": "attribute"}, {"api_name": "cassiopeia.type.dto.common", "line_number": 157, "usage_type": "name"}]} +{"seq_id": "472178259", "text": "# ### Models Building\n# \n# Adapting attributes and label to prediction models and performing trainnings. \n\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\n\ndata = pd.read_csv('modified_hotelbookings.csv')\ndata.head(5)\n\n#separating attributes and labels\nlabel = data.iloc[:, 1].values\nattributes_data = data.drop('is_canceled', axis=1)\nattributes_data.head(5)\n\nattributes_data.info()\n\n#converting the collumns 0,2,10,11,12,13,17,18,20,22,26\nattributes = attributes_data.values\n\nlabel_encoder = LabelEncoder()\nattributes[:, 0] = label_encoder.fit_transform(attributes[:, 0])\nattributes[:, 2] = label_encoder.fit_transform(attributes[:, 2])\nattributes[:, 10] = label_encoder.fit_transform(attributes[:, 10])\nattributes[:, 11] = label_encoder.fit_transform(attributes[:, 11])\nattributes[:, 12] = label_encoder.fit_transform(attributes[:, 12])\nattributes[:, 13] = label_encoder.fit_transform(attributes[:, 13])\nattributes[:, 17] = label_encoder.fit_transform(attributes[:, 17])\nattributes[:, 18] = label_encoder.fit_transform(attributes[:, 18])\nattributes[:, 20] = label_encoder.fit_transform(attributes[:, 20])\nattributes[:, 22] = label_encoder.fit_transform(attributes[:, 22])\nattributes[:, 26] = label_encoder.fit_transform(attributes[:, 26])\n\nprint('Verifying if there are only numbers in attributes:\\n')\nprint(attributes[0:5])\nprint('\\nAttributes lenght: ', + len(attributes))\nprint('Labels lenght: ', + len(label))\n\n#importing resources and reports\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, accuracy_score, classification_report\n\n#split train x test\nX_train, X_test, y_train, y_test = train_test_split(attributes, label, test_size=0.3, random_state=0)\n\n#function for model training and report\ndef model_report(model):\n model.fit(X_train, y_train)\n predicts = model.predict(X_test)\n cm = confusion_matrix(y_test, predicts)\n acc = accuracy_score(y_test, predicts)\n cr = classification_report(y_test, predicts)\n print('Confusion Matrix:\\n')\n print(cm)\n print('\\nAccuracy: ', + acc*100)\n print('\\nClassification Report:')\n print(cr)\n\n\n# #### Decision Tree Classifier\n\nfrom sklearn.tree import DecisionTreeClassifier\n\n#training decision tree classifier\ndtc = DecisionTreeClassifier()\nmodel_report(dtc)\n\n\n# #### Logistic Regression\n\nfrom sklearn.linear_model import LogisticRegression\n\nlr = LogisticRegression()\nmodel_report(lr)\n\n\n# #### Naive Bayes (GaussianNB)\n\nfrom sklearn.naive_bayes import GaussianNB\n\nnb = GaussianNB()\nmodel_report(nb)", "sub_path": "scripts/models_building.py", "file_name": "models_building.py", "file_ext": "py", "file_size_in_byte": 2544, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "468721690", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# Copyright (C) 2014 University of Dundee & Open Microscopy Environment.\n# All rights reserved.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\n\"\"\"\nImplementation of the OMERO.features AbstractAPI\n\"\"\"\n\nfrom AbstractAPI import (\n AbstractFeatureRow, AbstractFeatureStore, AbstractFeatureStoreManager)\nimport omero\nimport omero.clients\nfrom omero.rtypes import unwrap, wrap\n\nimport itertools\nimport re\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nDEFAULT_NAMESPACE = 'omero.features/0.1'\nDEFAULT_FEATURE_SUBSPACE = 'features'\nDEFAULT_ANNOTATION_SUBSPACE = 'source'\n\nFEATURE_NAME_RE = r'^[A-Za-z0-9][A-Za-z0-9_ \\-\\(\\)\\[\\]\\{\\}\\.]*$'\n\n# Indicates the object ID is unknown\nNOID = -1\n\n\nclass TableStoreException(Exception):\n \"\"\"\n Parent class for exceptions occuring in the OMERO.features tables store\n implementation\n \"\"\"\n pass\n\n\nclass OmeroTableException(TableStoreException):\n \"\"\"\n Errors whilst using the OMERO.tables API\n \"\"\"\n pass\n\n\nclass NoTableMatchException(TableStoreException):\n \"\"\"\n No matching annotation was found when searching for a table\n \"\"\"\n pass\n\n\nclass TooManyTablesException(TableStoreException):\n \"\"\"\n Too many matching annotation were found when searching for a table\n \"\"\"\n pass\n\n\nclass TableUsageException(TableStoreException):\n \"\"\"\n Invalid usage of this implementation of the Features API\n \"\"\"\n pass\n\n\nclass FeaturePermissionException(TableStoreException):\n \"\"\"\n Client does not have permission to access a feature table\n \"\"\"\n pass\n\n\nclass FeatureRowException(TableStoreException):\n \"\"\"\n Errors in a FeatureRow object\n \"\"\"\n pass\n\n\nclass FeatureRow(AbstractFeatureRow):\n\n def __init__(self, names=None, values=None,\n infonames=None, infovalues=None):\n if not names and not values:\n raise FeatureRowException(\n 'At least one of names or values must be provided')\n\n if names and values and len(names) != len(values):\n raise FeatureRowException(\n 'names and values must have the same number of elements')\n self._names = names\n\n self._values = None\n if values:\n self.values = values\n\n self._infonames = infonames\n self._infovalues = None\n if infovalues:\n self.infovalues = infovalues\n\n self._namemap = {}\n self._infonamemap = {}\n\n def _get_index(self, name):\n try:\n return self._namemap[name], False\n except KeyError:\n pass\n try:\n return self._infonamemap[name], True\n except KeyError:\n pass\n\n if self._names and not self._namemap:\n self._namemap = dict(ni for ni in zip(\n self._names, xrange(len(self._names))))\n if self._infonames and not self._infonamemap:\n self._infonamemap = dict(ni for ni in zip(\n self._infonames, xrange(len(self._infonames))))\n try:\n return self._namemap[name], False\n except KeyError:\n return self._infonamemap[name], True\n\n def __getitem__(self, key):\n i, m = self._get_index(key)\n if m:\n return self.infovalues[i]\n return self.values[i]\n\n def __setitem__(self, key, value):\n i, m = self._get_index(key)\n if m:\n self.infovalues[i] = value\n else:\n self.values[i] = value\n\n @property\n def names(self):\n return self._names\n\n @property\n def values(self):\n return self._values\n\n @values.setter\n def values(self, value):\n if self._names:\n w = len(self._names)\n elif self._values:\n w = len(self._values)\n else:\n w = len(value)\n if len(value) != w:\n raise FeatureRowException(\n 'Expected %d elements, received %d' % (w, len(value)))\n self._values = value\n\n @values.deleter\n def values(self):\n del self._values\n\n @property\n def infonames(self):\n return self._infonames\n\n @property\n def infovalues(self):\n return self._infovalues\n\n @infovalues.setter\n def infovalues(self, value):\n if self._infonames and len(self._infonames) != len(value):\n raise FeatureRowException(\n 'Expected %d elements, received %d' % (\n len(self._infonames), len(value)))\n self._infovalues = value\n\n @infovalues.deleter\n def infovalues(self):\n del self._infovalues\n\n def __repr__(self):\n return (\n '%s(names=%r, values=%r, infonames=%r, infovalues=%r)' %\n (self.__class__.__name__, self._names, self._values,\n self._infonames, self._infovalues))\n\n\nclass PermissionsHandler(object):\n \"\"\"\n Handles permissions checks on objects handled by OMERO.features.\n\n These are stricter than the OMERO model: only owners are allowed to\n write or edit objects. Annotation permissions are as standard.\n \"\"\"\n\n def __init__(self, session):\n self.context = session.getAdminService().getEventContext()\n\n def get_userid(self):\n return self.context.userId\n\n def can_annotate(self, obj):\n p = obj.getDetails().getPermissions()\n return p.canAnnotate()\n\n def can_edit(self, obj):\n d = obj.getDetails()\n return (self.get_userid() == unwrap(d.getOwner().id) and\n d.getPermissions().canEdit())\n\n\nclass FeatureTable(AbstractFeatureStore):\n \"\"\"\n A feature store.\n Each row is an Image-ID, Roi-ID and a single fixed-width DoubleArray\n \"\"\"\n\n def __init__(self, session, name, ft_space, ann_space, ownerid,\n coldesc=None):\n self.session = session\n self.perms = PermissionsHandler(session)\n self.name = name\n self.ft_space = ft_space\n self.ann_space = ann_space\n self.cols = None\n self.table = None\n self.ftnames = None\n self.chunk_size = None\n self.get_table(ownerid, coldesc=coldesc)\n\n def _owns_table(func):\n def assert_owns_table(*args, **kwargs):\n self = args[0]\n if not self.perms.can_edit(self.table.getOriginalFile()):\n raise FeaturePermissionException(\n 'Feature table must be owned by the current user')\n return func(*args, **kwargs)\n return assert_owns_table\n\n def close(self):\n \"\"\"\n Close the table\n \"\"\"\n if self.table:\n self.table.close()\n self.table = None\n self.cols = None\n self.ftnames = None\n\n def get_table(self, ownerid, coldesc=None):\n \"\"\"\n Get the table using the parameters specified during initialisation\n\n :param ownerid: The user-ID of the owner of the table file\n :param coldesc: If provided a new table will be created and\n initialised with this list of feature names, default None\n (table must already exist)\n \"\"\"\n tablepath = self.ft_space + '/' + self.name\n if self.table:\n if coldesc:\n raise TableUsageException(\n 'New table requested but already open: %s' % tablepath)\n assert self.cols\n return self.table\n\n q = {'name': self.name, 'path': self.ft_space}\n if ownerid > -1:\n q['details.owner.id'] = ownerid\n tablefile = self.get_objects('OriginalFile', q)\n\n if coldesc:\n if tablefile:\n raise TooManyTablesException(\n 'Table file already exists: %s' % tablepath)\n if self.perms.get_userid() != ownerid:\n raise TableUsageException(\n 'Unable to create table for a different user')\n self.new_table(coldesc)\n else:\n if len(tablefile) < 1:\n raise NoTableMatchException(\n 'No files found for: %s' % tablepath)\n if len(tablefile) > 1:\n raise TooManyTablesException(\n 'Multiple files found for: %s' % tablepath)\n self.open_table(tablefile[0])\n return self.table\n\n def new_table(self, coldesc):\n \"\"\"\n Create a new table\n\n :param coldesc: A list of column names\n \"\"\"\n for n in coldesc:\n if not re.match(FEATURE_NAME_RE, n):\n raise TableUsageException('Invalid feature name: %s' % n)\n\n tablepath = self.ft_space + '/' + self.name\n self.table = self.session.sharedResources().newTable(0, tablepath)\n if not self.table:\n raise OmeroTableException(\n 'Failed to create table: %s' % tablepath)\n # Name may not be split into dirname (path) and basename (name)\n # components https://trac.openmicroscopy.org.uk/ome/ticket/12576\n tof = self.table.getOriginalFile()\n tid = unwrap(tof.getId())\n if (unwrap(tof.getPath()) != self.ft_space or\n unwrap(tof.getName()) != self.name):\n log.warn('Overriding table path and name')\n tof.setPath(wrap(self.ft_space))\n tof.setName(wrap(self.name))\n tof = self.session.getUpdateService().saveAndReturnObject(tof)\n\n # Note table.getOriginalFile will still return the old object.\n # Force a reload by re-opening table to avoid sync errors when\n # storing data.\n self.table.close()\n self.table = self.session.sharedResources().openTable(tof)\n if not self.table:\n raise OmeroTableException('Failed to reopen table ID:%d' % tid)\n\n coldef = [\n omero.grid.ImageColumn('ImageID', ''),\n omero.grid.RoiColumn('RoiID', '')\n ]\n\n # We don't currently have a good way of storing individual feature\n # names for a DoubleArrayColumn:\n # - The number of DoubleColumns allowed in a table is limited (and\n # slow)\n # - Tables.setMetadata is broken\n # https://trac.openmicroscopy.org.uk/ome/ticket/12606\n # - Column descriptions can't be retrieved through the API\n # - The total size of table attributes is limited to around 64K (not\n # sure if this is a per-attribute/object/table limitation)\n # For now save the feature names into the column name.\n names = ','.join(coldesc)\n if len(names) > 64000:\n log.warn(\n 'Feature names may exceed the limit of the current Tables API')\n coldef.append(omero.grid.DoubleArrayColumn(\n names, '', len(coldesc)))\n\n try:\n self.table.initialize(coldef)\n except omero.InternalException:\n log.error('Failed to initialize table, deleting: %d', tid)\n self.session.getUpdateService().deleteObject(tof)\n raise\n self.cols = self.table.getHeaders()\n if not self.cols:\n raise OmeroTableException(\n 'Failed to get columns for table ID:%d' % tid)\n\n def open_table(self, tablefile):\n \"\"\"\n Open an existing table\n\n :param tablefile: An OriginalFile\n \"\"\"\n tid = unwrap(tablefile.getId())\n self.table = self.session.sharedResources().openTable(tablefile)\n if not self.table:\n raise OmeroTableException('Failed to open table ID:%d' % tid)\n self.cols = self.table.getHeaders()\n if not self.cols:\n raise OmeroTableException(\n 'Failed to get columns for table ID:%d' % tid)\n\n def feature_names(self):\n \"\"\"\n Get the list of feature names\n \"\"\"\n if not self.ftnames:\n self.ftnames = self.cols[2].name.split(',')\n assert len(self.ftnames) == self.cols[2].size\n return self.ftnames\n\n def store_by_image(self, image_id, values):\n self.store_by_object('Image', long(image_id), values)\n\n def store_by_roi(self, roi_id, values, image_id=None):\n if image_id is None:\n params = omero.sys.ParametersI()\n params.addId(roi_id)\n image_id = self.session.getQueryService().projection(\n 'SELECT r.image.id FROM Roi r WHERE r.id=:id', params)\n try:\n image_id = unwrap(image_id[0][0])\n except IndexError:\n raise TableUsageException('No image found for Roi: %d', roi_id)\n if image_id < 0:\n self.store_by_object('Roi', long(roi_id), values)\n else:\n self.store_by_object(\n 'Roi', long(roi_id), values, 'Image', image_id)\n\n @_owns_table\n def store_by_object(self, object_type, object_id, values,\n parent_type=None, parent_id=None, replace=True):\n \"\"\"\n Store a feature row\n\n :param object_type: The object directly associated with the features\n :param object_id: The object ID\n :param values: Feature values, an array of doubles\n :param parent_type: The parent type of the object, optional\n :param parent_id: The parent ID of the object\n :param replace: If True (default) replace existing rows with the same\n IDs\n \"\"\"\n image_id = NOID\n roi_id = NOID\n if object_type == 'Image':\n if parent_type:\n raise TableUsageException('Parent not supported for Image')\n image_id = object_id\n elif object_type == 'Roi':\n roi_id = object_id\n if parent_type:\n if parent_type == 'Image':\n image_id = parent_id\n else:\n raise TableUsageException(\n 'Invalid parent type: %s', parent_type)\n else:\n raise TableUsageException(\n 'Invalid object type: %s' % object_type)\n\n self.cols[0].values = [image_id]\n self.cols[1].values = [roi_id]\n\n offset = -1\n if replace:\n conditions = '(ImageID==%d) & (RoiID==%d)' % (\n self.cols[0].values[0], self.cols[1].values[0])\n offsets = self.table.getWhereList(\n conditions, {}, 0, self.table.getNumberOfRows(), 0)\n if offsets:\n offset = max(offsets)\n\n self.cols[2].values = [values]\n\n if offset > -1:\n data = omero.grid.Data(rowNumbers=[offset], columns=self.cols)\n self.table.update(data)\n else:\n self.table.addData(self.cols)\n\n if image_id > NOID:\n self.create_file_annotation('Image', image_id, self.ann_space,\n self.table.getOriginalFile())\n if roi_id > NOID:\n self.create_file_annotation('Roi', roi_id, self.ann_space,\n self.table.getOriginalFile())\n\n def fetch_by_image(self, image_id, last=False):\n values = self.fetch_by_object('Image', image_id)\n if len(values) > 1 and not last:\n raise TableUsageException(\n 'Multiple feature rows found for Image %d' % image_id)\n if not values:\n raise TableUsageException(\n 'No feature rows found for Image %d' % image_id)\n return self.feature_row(values[-1])\n\n def fetch_by_roi(self, roi_id, last=False):\n values = self.fetch_by_object('Roi', roi_id)\n if len(values) > 1 and not last:\n raise TableUsageException(\n 'Multiple feature rows found for Roi %d' % roi_id)\n if not values:\n raise TableUsageException(\n 'No feature rows found for Roi %d' % roi_id)\n return self.feature_row(values[-1])\n\n def fetch_all(self, image_id):\n values = self.fetch_by_object('Image', image_id)\n return [self.feature_row(v) for v in values]\n\n def filter(self, conditions):\n log.warn('The filter/query syntax is still under development')\n values = self.filter_raw(conditions)\n return [self.feature_row(v) for v in values]\n\n def fetch_by_object(self, object_type, object_id):\n \"\"\"\n Fetch all feature rows for an object\n\n :param object_type: The object type\n :param object_id: The object ID\n :return: A list of tuples (Image-ID, Roi-ID, feature-values)\n \"\"\"\n if object_type in ('Image', 'Roi'):\n cond = '(%sID==%d)' % (object_type, object_id)\n else:\n raise TableUsageException(\n 'Unsupported object type: %s' % object_type)\n return self.filter_raw(cond)\n\n def filter_raw(self, conditions):\n \"\"\"\n Query a feature table, return data as rows\n\n :param conditions: The query conditions\n Note the query syntax is still to be decided\n :return: A list of tuples (Image-ID, Roi-ID, feature-values)\n \"\"\"\n offsets = self.table.getWhereList(\n conditions, {}, 0, self.table.getNumberOfRows(), 0)\n values = self.chunked_table_read(offsets, self.get_chunk_size())\n\n # Convert into row-wise storage\n if not values:\n return []\n for v in values:\n assert len(offsets) == len(v)\n return zip(*values)\n\n def feature_row(self, values):\n \"\"\"\n Create a FeatureRow object\n\n :param values: The feature values\n \"\"\"\n return FeatureRow(\n names=self.feature_names(),\n infonames=[h.name for h in self.cols[:2]],\n values=values[2], infovalues=values[:2])\n\n def get_chunk_size(self):\n \"\"\"\n Ice has a maximum message size. Use a very rough heuristic to decide\n how many table rows to read in one go\n\n Assume only doubles are stored (8 bytes), and keep the table chunk size\n to <16MB\n \"\"\"\n if not self.chunk_size:\n # Use size for ArrayColumns, otherwise 1\n rowsize = sum(getattr(c, 'size', 1) for c in self.cols)\n self.chunk_size = max(16777216 / (rowsize * 8), 1)\n\n return self.chunk_size\n\n def chunked_table_read(self, offsets, chunk_size):\n \"\"\"\n Read part of a table in chunks to avoid the Ice maximum message size\n \"\"\"\n values = None\n\n log.info('Chunk size: %d', chunk_size)\n for n in xrange(0, len(offsets), chunk_size):\n log.info('Chunk offset: %d+%d', n, chunk_size)\n data = self.table.readCoordinates(offsets[n:(n + chunk_size)])\n if values is None:\n values = [c.values for c in data.columns]\n else:\n for c, v in itertools.izip(data.columns, values):\n v.extend(c.values)\n\n return values\n\n def get_objects(self, object_type, kvs):\n \"\"\"\n Retrieve OMERO objects\n \"\"\"\n params = omero.sys.ParametersI()\n\n qs = self.session.getQueryService()\n conditions = []\n\n for k, v in kvs.iteritems():\n ek = k.replace('_', '__').replace('.', '_')\n if isinstance(v, list):\n conditions.append(\n '%s in (:%s)' % (k, ek))\n else:\n conditions.append(\n '%s = :%s' % (k, ek))\n params.add(ek, wrap(v))\n\n q = 'FROM %s' % object_type\n if conditions:\n q += ' WHERE ' + ' AND '.join(conditions)\n\n results = qs.findAllByQuery(q, params)\n return results\n\n def create_file_annotation(self, object_type, object_id, ns, ofile):\n \"\"\"\n Create a file annotation\n\n :param object_type: The object type\n :param object_id: The object ID\n :param ns: The namespace\n :param ofile: The originalFile\n \"\"\"\n fid = unwrap(ofile.getId())\n links = self._file_annotation_exists(object_type, object_id, ns, fid)\n if len(links) > 1:\n log.warn('Multiple links found: ns:%s %s:%d file:%d',\n ns, object_type, object_id, fid)\n if links:\n return links[0]\n\n obj = self.get_objects(object_type, {'id': object_id})\n if len(obj) != 1:\n raise OmeroTableException(\n 'Failed to get object %s:%d' % (object_type, object_id))\n link = getattr(omero.model, '%sAnnotationLinkI' % object_type)()\n ann = omero.model.FileAnnotationI()\n ann.setNs(wrap(ns))\n ann.setFile(ofile)\n link.setParent(obj[0])\n link.setChild(ann)\n link = self.session.getUpdateService().saveAndReturnObject(link)\n return link\n\n def _file_annotation_exists(self, object_type, object_id, ns, file_id):\n q = ('FROM %sAnnotationLink ial WHERE ial.parent.id=:parent AND '\n 'ial.child.ns=:ns AND ial.child.file.id=:file') % object_type\n params = omero.sys.ParametersI()\n params.addLong('parent', object_id)\n params.addString('ns', ns)\n params.addLong('file', file_id)\n links = self.session.getQueryService().findAllByQuery(q, params)\n return links\n\n @_owns_table\n def delete(self):\n \"\"\"\n Delete the entire featureset including annotations\n \"\"\"\n # There's a bug (?) which means multiple FileAnnotations with the same\n # OriginalFile child can't be deleted using the graph spec methods.\n # For now just delete everything individually\n qs = self.session.getQueryService()\n tof = self.table.getOriginalFile()\n fid = unwrap(tof.getId())\n params = omero.sys.ParametersI()\n params.addId(fid)\n ds = []\n\n linktypes = self._get_annotation_link_types()\n for link in linktypes:\n r = qs.findAllByQuery(\n 'SELECT al FROM %s al WHERE al.child.file.id=:id' % link,\n params)\n ds.extend(r)\n\n r = qs.findAllByQuery(\n 'SELECT ann FROM FileAnnotation ann WHERE ann.file.id=:id', params)\n ds.extend(r)\n ds.append(tof)\n\n log.info('Deleting: %s',\n [(d.__class__.__name__, unwrap(d.getId())) for d in ds])\n\n us = self.session.getUpdateService()\n self.close()\n for d in ds:\n us.deleteObject(d)\n\n @staticmethod\n def _get_annotation_link_types():\n return [s for s in dir(omero.model) if s.endswith(\n 'AnnotationLink') and not s.startswith('_')]\n\n\nclass LRUCache(object):\n \"\"\"\n A naive least-recently-used cache. Removal is O(n)\n TODO: Improve efficiency\n \"\"\"\n\n def __init__(self, size):\n self.maxsize = size\n self.cache = {}\n self.counter = 0\n\n def __len__(self):\n return len(self.cache)\n\n def get(self, key, miss=None):\n try:\n v = self.cache[key]\n self.counter += 1\n v[1] = self.counter\n return v[0]\n except KeyError:\n return miss\n\n def insert(self, key, value):\n if key not in self.cache and len(self.cache) >= self.maxsize:\n self.remove_oldest()\n self.counter += 1\n self.cache[key] = [value, self.counter]\n\n def remove_oldest(self):\n mink, minv = min(self.cache.iteritems(), key=lambda kv: kv[1][1])\n return self.cache.pop(mink)[0]\n\n\nclass LRUClosableCache(LRUCache):\n \"\"\"\n Automatically call value.close() when an object is removed from the cache\n \"\"\"\n def remove_oldest(self):\n v = super(LRUClosableCache, self).remove_oldest()\n v.close()\n return v\n\n def close(self):\n while self.cache:\n log.debug('close, %s', self.cache)\n self.remove_oldest()\n\n\nclass FeatureTableManager(AbstractFeatureStoreManager):\n \"\"\"\n Manage storage of feature table files\n \"\"\"\n\n def __init__(self, session, **kwargs):\n self.session = session\n namespace = kwargs.get('namespace', DEFAULT_NAMESPACE)\n self.ft_space = kwargs.get(\n 'ft_space', namespace + '/' + DEFAULT_FEATURE_SUBSPACE)\n self.ann_space = kwargs.get(\n 'ann_space', namespace + '/' + DEFAULT_ANNOTATION_SUBSPACE)\n self.cachesize = kwargs.get('cachesize', 10)\n self.fss = LRUClosableCache(kwargs.get('cachesize', 10))\n\n def create(self, featureset_name, names):\n try:\n ownerid = self.session.getAdminService().getEventContext().userId\n fs = self.get(featureset_name, ownerid)\n if fs:\n raise TooManyTablesException(\n 'Featureset already exists: %s' % featureset_name)\n except NoTableMatchException:\n pass\n\n coldesc = names\n fs = FeatureTable(\n self.session, featureset_name, self.ft_space, self.ann_space,\n ownerid, coldesc)\n self.fss.insert((featureset_name, ownerid), fs)\n return fs\n\n def get(self, featureset_name, ownerid=None):\n if ownerid is None:\n ownerid = self.session.getAdminService().getEventContext().userId\n k = (featureset_name, ownerid)\n fs = self.fss.get(k)\n # If fs.table is None it has probably been closed\n if not fs or not fs.table:\n fs = FeatureTable(\n self.session, featureset_name, self.ft_space, self.ann_space,\n ownerid)\n # raises NoTableMatchException if not found\n self.fss.insert(k, fs)\n return fs\n\n def close(self):\n self.fss.close()\n", "sub_path": "features/OmeroTablesFeatureStore.py", "file_name": "OmeroTablesFeatureStore.py", "file_ext": "py", "file_size_in_byte": 26184, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 36, "usage_type": "call"}, {"api_name": "AbstractAPI.AbstractFeatureRow", "line_number": 99, "usage_type": "name"}, {"api_name": "omero.rtypes.unwrap", "line_number": 230, "usage_type": "call"}, {"api_name": "AbstractAPI.AbstractFeatureStore", "line_number": 234, "usage_type": "name"}, {"api_name": "re.match", "line_number": 319, "usage_type": "call"}, {"api_name": "omero.rtypes.unwrap", "line_number": 330, "usage_type": "call"}, {"api_name": "omero.rtypes.unwrap", "line_number": 331, "usage_type": "call"}, {"api_name": "omero.rtypes.unwrap", "line_number": 332, "usage_type": "call"}, {"api_name": "omero.rtypes.wrap", "line_number": 334, "usage_type": "call"}, {"api_name": "omero.rtypes.wrap", "line_number": 335, "usage_type": "call"}, {"api_name": "omero.grid.ImageColumn", "line_number": 347, "usage_type": "call"}, {"api_name": "omero.grid", "line_number": 347, "usage_type": "attribute"}, {"api_name": "omero.grid.RoiColumn", "line_number": 348, "usage_type": "call"}, {"api_name": "omero.grid", "line_number": 348, "usage_type": "attribute"}, {"api_name": "omero.grid.DoubleArrayColumn", "line_number": 365, "usage_type": "call"}, {"api_name": "omero.grid", "line_number": 365, "usage_type": "attribute"}, {"api_name": "omero.InternalException", "line_number": 370, "usage_type": "attribute"}, {"api_name": "omero.rtypes.unwrap", "line_number": 385, "usage_type": "call"}, {"api_name": "omero.sys.ParametersI", "line_number": 408, "usage_type": "call"}, {"api_name": "omero.sys", "line_number": 408, "usage_type": "attribute"}, {"api_name": "omero.rtypes.unwrap", "line_number": 413, "usage_type": "call"}, {"api_name": "omero.grid.Data", "line_number": 469, "usage_type": "call"}, {"api_name": "omero.grid", "line_number": 469, "usage_type": "attribute"}, {"api_name": "itertools.izip", "line_number": 583, "usage_type": "call"}, {"api_name": "omero.sys.ParametersI", "line_number": 592, "usage_type": "call"}, {"api_name": "omero.sys", "line_number": 592, "usage_type": "attribute"}, {"api_name": "omero.rtypes.wrap", "line_number": 605, "usage_type": "call"}, {"api_name": "omero.rtypes.unwrap", "line_number": 623, "usage_type": "call"}, {"api_name": "omero.model", "line_number": 635, "usage_type": "attribute"}, {"api_name": "omero.model.FileAnnotationI", "line_number": 636, "usage_type": "call"}, {"api_name": "omero.model", "line_number": 636, "usage_type": "attribute"}, {"api_name": "omero.rtypes.wrap", "line_number": 637, "usage_type": "call"}, {"api_name": "omero.sys.ParametersI", "line_number": 647, "usage_type": "call"}, {"api_name": "omero.sys", "line_number": 647, "usage_type": "attribute"}, {"api_name": "omero.rtypes.unwrap", "line_number": 664, "usage_type": "call"}, {"api_name": "omero.sys.ParametersI", "line_number": 665, "usage_type": "call"}, {"api_name": "omero.sys", "line_number": 665, "usage_type": "attribute"}, {"api_name": "omero.rtypes.unwrap", "line_number": 682, "usage_type": "call"}, {"api_name": "omero.model", "line_number": 691, "usage_type": "attribute"}, {"api_name": "AbstractAPI.AbstractFeatureStoreManager", "line_number": 744, "usage_type": "name"}]} +{"seq_id": "153083563", "text": "from django.shortcuts import render\nimport csv\nfrom myOpt.models import OptimizationData,FinalData\nfrom datetime import datetime,timedelta\nimport datetime\nfrom django.http import HttpResponse\nimport xlsxwriter\nfrom numpy.lib.function_base import average\nimport math\nfrom pandas import Series\nfrom pandas import DataFrame\nfrom pandas import concat\nfrom matplotlib import pyplot\nimport numpy as np\nfrom pandas.core.frame import DataFrame\nfrom sklearn.metrics import mean_squared_error\nfrom statsmodels.tsa.ar_model import AR\nfrom statsmodels.tsa.arima_model import ARIMA\n\n#Create your views here.\ndef readAndWriteData(request):\n OptimizationData.objects.all().delete()\n counter = 0\n csv_file = open('example.csv')\n csv_reader = csv.reader(csv_file, delimiter = ',')\n next(csv_reader)\n objList = []\n for row in csv_reader:\n s = row[0]\n d =datetime.datetime.strptime(s, '%d-%m-%Y').strftime('%Y-%m-%d')\n \n newRow = OptimizationData(date = d, store = row[1],location = row[2],code = row[3],salerProductName = row[4],mainGroup = row[5],subGroup = row[6],productVariety = row[7],salesAmount = row[8])\n objList.append(newRow)\n counter = counter + 1\n if counter % 20 == 0:\n OptimizationData.objects.bulk_create(objList)\n objList = []\n \n if objList:\n OptimizationData.objects.bulk_create(objList)\n \n csv_file.close()\n return HttpResponse('finito')\n \ndef readFromDB(request):\n oldopt = OptimizationData.objects.filter(productVariety = 'cesit-1').order_by('date')\n FinalData.objects.all().delete()\n count = 0\n objlist = []\n for obj in oldopt:\n if count == 0:\n lastDate = obj.date\n lastAmount = obj.salesAmount\n else:\n if lastDate == obj.date:\n obj.salesAmount = lastAmount + obj.salesAmount\n lastObj = obj\n else:\n newEntry = FinalData(date = lastObj.date,salesAmount = lastObj.salesAmount)\n objlist.append(newEntry)\n lastObj = obj\n lastDate = obj.date\n \n if len(objlist) == 20:\n FinalData.objects.bulk_create(objlist)\n objlist = []\n count+= 1 \n \n if objlist:\n FinalData.objects.bulk_create(objlist)\n \n opt = FinalData.objects.all()\n opt2 = FinalData.objects.all()\n fitCount = 0\n weeklyFit = []\n weekCount = 0\n totalCount = 0\n finalArray = []\n for obj in opt2:\n weeklyFit.append(obj)\n weekCount += 1\n totalCount += 1\n if weekCount == 7 or totalCount == 333:\n total = 0\n for objs in weeklyFit:\n total += objs.salesAmount\n if weekCount == 7:\n average = total/7\n else:\n average = total/5\n for objs in weeklyFit:\n distance = objs.salesAmount - average\n finalArray.append(objs.salesAmount - distance/2)\n weekCount = 0\n weeklyFit = []\n fitCount2 = 0\n weeklyFit2 = []\n weekCount2 = 0\n totalCount2 = 0\n finalArray2 = []\n threefit = []\n for obj in opt2:\n weeklyFit2.append(obj)\n weekCount2 += 1\n totalCount2 += 1\n if weekCount2 == 7 or totalCount2 == 333:\n total = 0\n for objs in weeklyFit2:\n total += objs.salesAmount\n \n if weekCount2 == 7:\n average = total/7\n else:\n average = total/5\n \n fitCount2 = 0\n \n for objs in weeklyFit2:\n fitCount2+=1\n average2 = average\n threefit.append(objs)\n totalthree = 0\n if fitCount2 == 3:\n for objss in threefit:\n totalthree += objss.salesAmount\n average2 = totalthree/3\n threefit = []\n fitCount2 = 0\n finalArray2.append(average2)\n weekCount2 = 0\n weeklyFit2 = []\n \n baseArray = []\n predictionArray = []\n totalAverage = 0\n total = 0\n lastMonthAverage = 0\n totalLastMonth = 0\n realArray = []\n monthCount = 0\n monthTurn = 0\n patternArray = []\n febTurn = 0\n totalBaseCount = 0\n for i in range(31):\n patternArray.append(0)\n \n for obj in opt2:\n if totalBaseCount < 258:\n total += obj.salesAmount\n if totalBaseCount < 258:\n if totalBaseCount == 0:\n patternArray[monthCount] = obj.salesAmount\n else:\n patternArray[monthCount] += obj.salesAmount\n if obj.date > datetime.date(2016,12,31) and obj.date < datetime.date(2017,2,1):\n baseArray.append(obj)\n totalLastMonth += obj.salesAmount\n monthTurn = 0\n if obj.date > datetime.date(2017,1,31) and obj.date < datetime.date(2017,3,1):\n realArray.append(obj)\n febTurn = 1 \n monthCount+= 1\n if febTurn == 1 and monthCount == 28:\n monthCount = 0\n monthTurn = 0\n febTurn = 0\n if monthTurn == 0 and monthCount == 31:\n monthCount = 0\n monthTurn = 1\n if monthTurn == 1 and monthCount == 30:\n monthCount = 0\n monthTurn = 0\n totalBaseCount+=1\n for i in range(28):\n patternArray[i] = patternArray[i]/258\n lastMonthAverage = totalLastMonth/31\n totalAverage = total/258\n predictCount = 0\n predictAverage = totalAverage + (lastMonthAverage - totalAverage)/3*4\n predictTotal = predictAverage * 28\n remaining = 0\n while predictCount < 28:\n if predictCount == 0:\n temp = (baseArray[predictCount].salesAmount/5*3 + patternArray[predictCount]/5*2) + lastMonthAverage - totalAverage\n remaining = ((baseArray[predictCount].salesAmount/5*3 + patternArray[predictCount]/5*2) - predictAverage)/4\n \n else:\n if remaining != 0:\n temp = (baseArray[predictCount].salesAmount/5*3 + patternArray[predictCount]/5*2) - remaining + lastMonthAverage - totalAverage\n remaining = ((baseArray[predictCount].salesAmount/5*3 + patternArray[predictCount]/5*2) - predictAverage)/4\n else:\n temp = (baseArray[predictCount].salesAmount/5*3 + patternArray[predictCount]/5*2) + lastMonthAverage - totalAverage\n \n temp = math.ceil(temp)\n predictionArray.append(temp)\n predictCount+=1 \n workbook = xlsxwriter.Workbook('Prediction.xlsx')\n worksheet = workbook.add_worksheet('Sheet 1')\n bold = workbook.add_format({'bold': True})\n worksheet.write('A1', 'Date', bold)\n worksheet.write('B1', 'Amount', bold)\n worksheet.write('C1', 'BasicFit', bold)\n worksheet.write('D1', 'RoundFit', bold)\n worksheet.write('E1', 'RealFeb', bold)\n worksheet.write('F1', 'PredictedFeb', bold)\n worksheet.write('H1','Date',bold)\n worksheet.write('I1','Predicted Amount',bold)\n worksheet.write('J1','Real Amount',bold)\n\n row = 1\n col = 0\n row2 = 1\n col2 = 7\n row3 =1\n array = []\n date = []\n sayac = 0\n \n\n totalCount = 0\n for obj in opt:\n worksheet.write(row, col,str(obj.date))\n worksheet.write(row, col + 1, obj.salesAmount)\n worksheet.write(row, col + 2, finalArray[totalCount])\n worksheet.write(row, col + 3, finalArray2[totalCount])\n if totalCount<28:\n worksheet.write(row, col + 4, realArray[totalCount].salesAmount)\n worksheet.write(row, col + 5, predictionArray[totalCount])\n if sayac>=259 and sayac <= 286:\n worksheet.write(row3,col2,str(obj.date))\n row3+=1\n array.append(obj.salesAmount)\n date.append(str(obj.date))\n row += 1\n sayac+=1\n if totalCount!=333:\n totalCount+=1\n# Write a total using a formula.\n X = array\n size =int( len(X) * 0.916)\n train, test, tasi= X[0:259], X[259:287],X[287:len(X)]\n train.extend(tasi)\n print ('TEST')\n print(test)\n print(train)\n history = [x for x in train]\n a =np.array(history, dtype=np.float64)\n p = a.tolist()\n myarr = []\n myarr = p\n sayac2 = 0\n predictions = list()\n for t in range(len(test)):\n sayac2+=1 \n model = ARIMA(myarr[0:len(train)-len(test)+t], order=(5,1,0))\n model_fit = model.fit(disp=False)\n output = model_fit.forecast()\n yhat = output[0]\n predictions.append(yhat)\n obs = test[t]\n history.append(obs)\n worksheet.write(row2,col2+1,yhat)\n worksheet.write(row2,col2+2,obs)\n row2+=1\n error = mean_squared_error(test, predictions)\n print('Test MSE: %.3f' % error)\n\n pyplot.plot(test)\n pyplot.plot(predictions, color='red')\n pyplot.show()\n\n\n\n chart = workbook.add_chart({'type': 'line'})\n\n\n chart.add_series({\n 'values': ['Sheet 1', 0, 1,row-1 ,1],\n 'categories' : ['Sheet 1', 1, 0, row-1, 0],\n 'line' : {'color': 'blue'},\n 'name' : 'Real Amount',\n })\n chart.add_series({\n 'values': ['Sheet 1', 0, 2,row-1 ,2],\n 'categories' : ['Sheet 1', 1, 0, row-1, 0],\n 'line' : {'color': 'orange'},\n 'name' : 'BasicFit',\n })\n chart.set_title ({'name': 'Optimization Results'})\n chart.set_x_axis({'name': 'Sales Amount'})\n chart.set_y_axis({'name': 'Date'})\n \n chart2 = workbook.add_chart({'type': 'line'})\n\n\n# Configure the chart. In simplest case we add one or more data series.\n chart2.add_series({\n 'values': ['Sheet 1', 0, 1,row-1 ,1],\n 'categories' : ['Sheet 1', 1, 0, row-1, 0],\n 'line' : {'color': 'blue'},\n 'name' : 'Real Amount',\n })\n chart2.add_series({\n 'values': ['Sheet 1', 0, 3,row-1 ,3],\n 'categories' : ['Sheet 1', 1, 0, row-1, 0],\n 'line' : {'color': 'orange'},\n 'name' : 'roundFit',\n })\n chart2.set_title ({'name': 'Optimization Results2'})\n chart2.set_x_axis({'name': 'Sales Amount'})\n chart2.set_y_axis({'name': 'Date'})\n \n chart3 = workbook.add_chart({'type': 'line'})\n\n\n# Configure the chart. In simplest case we add one or more data series.\n chart3.add_series({\n 'values': ['Sheet 1', 0, 4,28 ,4],\n 'categories' : ['Sheet 1', 260, 0, 287, 0],\n 'line' : {'color': 'blue'},\n 'name' : 'Real Amount',\n })\n chart3.add_series({\n 'values': ['Sheet 1', 0, 5,28 ,5],\n 'categories' : ['Sheet 1', 260, 0, 287, 0],\n 'line' : {'color': 'orange'},\n 'name' : 'roundFit',\n })\n \n chart4 = workbook.add_chart({'type': 'line'})\n\n chart4.add_series({ #real\n 'values': ['Sheet 1', 1, 9,row2-1 ,9],\n 'categories' : ['Sheet 1', 1, 7, row3-1, 7],\n 'line' : {'color': 'blue'},\n 'name' : 'Real Amount',\n })\n \n chart4.add_series({ #predicted\n 'values': ['Sheet 1', 1, 8,row2-1 ,8],\n 'categories' : ['Sheet 1', 1, 7, row3-1, 7],\n 'line' : {'color': 'red'},\n 'name' : 'Real Amount',\n }) \n \n chart.set_title ({'name': 'Weekly Fit'})\n chart.set_x_axis({'name': 'Sales Amount'})\n chart.set_y_axis({'name': 'Date'}) \n \n chart2.set_title ({'name': '3 Days Fit'})\n chart2.set_x_axis({'name': 'Sales Amount'})\n chart2.set_y_axis({'name': 'Date'}) \n \n chart3.set_title ({'name': 'Optimization Results with our algorithm (For February)'})\n chart3.set_x_axis({'name': 'Sales Amount'})\n chart3.set_y_axis({'name': 'Date'})\n \n chart4.set_title ({'name': 'Regression Result with ARIMA (For February)'})\n chart4.set_x_axis({'name': 'Sales Amount'})\n chart4.set_y_axis({'name': 'Date'})\n \n worksheet.insert_chart('L1', chart)\n worksheet.insert_chart('L16', chart2)\n worksheet.insert_chart('L30', chart3)\n worksheet.insert_chart('L45', chart4)\n\n workbook.close()\n return HttpResponse('readFinito')\n \n", "sub_path": "Opt/myOpt/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 12499, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "myOpt.models.OptimizationData.objects.all", "line_number": 22, "usage_type": "call"}, {"api_name": "myOpt.models.OptimizationData.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "myOpt.models.OptimizationData", "line_number": 22, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "myOpt.models.OptimizationData", "line_number": 32, "usage_type": "call"}, {"api_name": "myOpt.models.OptimizationData.objects.bulk_create", "line_number": 36, "usage_type": "call"}, {"api_name": "myOpt.models.OptimizationData.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "myOpt.models.OptimizationData", "line_number": 36, "usage_type": "name"}, {"api_name": "myOpt.models.OptimizationData.objects.bulk_create", "line_number": 40, "usage_type": "call"}, {"api_name": "myOpt.models.OptimizationData.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "myOpt.models.OptimizationData", "line_number": 40, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 43, "usage_type": "call"}, {"api_name": "myOpt.models.OptimizationData.objects.filter", "line_number": 46, "usage_type": "call"}, {"api_name": "myOpt.models.OptimizationData.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "myOpt.models.OptimizationData", "line_number": 46, "usage_type": "name"}, {"api_name": "myOpt.models.FinalData.objects.all", "line_number": 47, "usage_type": "call"}, {"api_name": "myOpt.models.FinalData.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "myOpt.models.FinalData", "line_number": 47, "usage_type": "name"}, {"api_name": "myOpt.models.FinalData", "line_number": 59, "usage_type": "call"}, {"api_name": "myOpt.models.FinalData.objects.bulk_create", "line_number": 65, "usage_type": "call"}, {"api_name": "myOpt.models.FinalData.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "myOpt.models.FinalData", "line_number": 65, "usage_type": "name"}, {"api_name": "myOpt.models.FinalData.objects.bulk_create", "line_number": 70, "usage_type": "call"}, {"api_name": "myOpt.models.FinalData.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "myOpt.models.FinalData", "line_number": 70, "usage_type": "name"}, {"api_name": "myOpt.models.FinalData.objects.all", "line_number": 72, "usage_type": "call"}, {"api_name": "myOpt.models.FinalData.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "myOpt.models.FinalData", "line_number": 72, "usage_type": "name"}, {"api_name": "myOpt.models.FinalData.objects.all", "line_number": 73, "usage_type": "call"}, {"api_name": "myOpt.models.FinalData.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "myOpt.models.FinalData", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.lib.function_base.average", "line_number": 88, "usage_type": "name"}, {"api_name": "numpy.lib.function_base.average", "line_number": 90, "usage_type": "name"}, {"api_name": "numpy.lib.function_base.average", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.lib.function_base.average", "line_number": 112, "usage_type": "name"}, {"api_name": "numpy.lib.function_base.average", "line_number": 114, "usage_type": "name"}, {"api_name": "numpy.lib.function_base.average", "line_number": 120, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 156, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 160, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 195, "usage_type": "call"}, {"api_name": "xlsxwriter.Workbook", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 248, "usage_type": "attribute"}, {"api_name": "statsmodels.tsa.arima_model.ARIMA", "line_number": 256, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 369, "usage_type": "call"}]} +{"seq_id": "266570493", "text": "import bpy\r\nimport os\r\n\r\n\r\n#-----Creater Folder----\r\ndef createrFolder(path):\r\n folder = os.path.exists(path)\r\n if folder==False:\r\n os.makedirs(path)\r\n return path\r\n\r\n#-----Creater Folder---- \r\n\r\nclass bgmexportfbx(bpy.types.Operator):\r\n bl_idname = \"my_operator.bgmexportfbx\"\r\n bl_label = \"Bgmexportfbx\"\r\n bl_description = \"\"\r\n bl_options = {\"REGISTER\"}\r\n\r\n \r\n @classmethod\r\n def poll(cls, context):\r\n return True\r\n\r\n def execute(self, context): \r\n parName=bpy.context.scene.fbxcommonname\r\n filepath = bpy.data.filepath\r\n directory = os.path.dirname(filepath)\r\n bl2sppath=createrFolder(directory+\"\\\\bgm2FBX\\\\\")\r\n objlist=bpy.context.selected_objects\r\n for objs in objlist:\r\n bpy.data.objects[objs.name].select=False\r\n \r\n for objss in objlist:\r\n if directory==\"\":\r\n print(\"Please save you file\")\r\n if (directory!=\"\"): \r\n bpy.context.scene.objects.active = objss\r\n # bpy.ops.object.duplicate(linked=False)\r\n \r\n pos=objss.location\r\n print(\"pos\")\r\n newpos=(pos[0],pos[1],pos[2])\r\n\r\n obj=bpy.context.scene.objects.active\r\n bpy.data.objects[obj.name].select=True\r\n print(obj.name)\r\n o = obj\r\n vcos = [ o.matrix_world * v.co for v in o.data.vertices ]\r\n findCenter = lambda l: ( max(l) + min(l) ) / 2\r\n x,y,z = [ [ v[i] for v in vcos ] for i in range(3) ]\r\n center = [ findCenter(axis) for axis in [x,y,z] ]\r\n #pos=obj.location\r\n \r\n \r\n #--------FIND CENTER---------\r\n obj.location=(obj.location[0]-center[0],\r\n (obj.location[1]-center[1]),\r\n (obj.location[2]-center[2])+obj.dimensions.z/2)\r\n\r\n bpy.ops.apply.transformall()\r\n #--------FIND CENTER---------\r\n newpath=bl2sppath+parName+objss.name+\".fbx\" \r\n bpy.ops.export_scene.fbx(filepath=newpath, check_existing=False, \r\n axis_forward='-Z', axis_up='Y', filter_glob=\"*.fbx\", version='BIN7400', \r\n ui_tab='MAIN', use_selection=True, global_scale=1.0, apply_unit_scale=True, \r\n bake_space_transform=False, object_types={'ARMATURE', 'CAMERA', 'EMPTY', 'LAMP', 'MESH', 'OTHER'}, \r\n use_mesh_modifiers=True, mesh_smooth_type='OFF', use_mesh_edges=False, use_tspace=False, \r\n use_custom_props=False, add_leaf_bones=True, primary_bone_axis='Y', secondary_bone_axis='X', \r\n use_armature_deform_only=False, armature_nodetype='NULL', bake_anim=True, bake_anim_use_all_bones=True, \r\n bake_anim_use_nla_strips=True, bake_anim_use_all_actions=True, bake_anim_force_startend_keying=True, \r\n bake_anim_step=1.0, bake_anim_simplify_factor=1.0, use_anim=True, use_anim_action_all=True, \r\n use_default_take=True, use_anim_optimize=True, anim_optimize_precision=6.0, path_mode='AUTO', \r\n embed_textures=False, batch_mode='OFF', use_batch_own_dir=True, use_metadata=True) \r\n #--------Set POS---------\r\n bpy.data.objects[obj.name].location=newpos\r\n bpy.data.objects[obj.name].select=False\r\n os.system(\"start explorer \"+bl2sppath) \r\n return {\"FINISHED\"}\r\n ", "sub_path": "All_In_One/addons/BL2SP/operators/bgm_export_fbx.py", "file_name": "bgm_export_fbx.py", "file_ext": "py", "file_size_in_byte": 3532, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.exists", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 9, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 14, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 26, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 30, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 32, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 38, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 45, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 46, "usage_type": "attribute"}, {"api_name": "bpy.ops.apply.transformall", "line_number": 61, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 61, "usage_type": "attribute"}, {"api_name": "bpy.ops.export_scene.fbx", "line_number": 64, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 64, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 76, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "269897437", "text": "import json\nimport operator\nimport os\nimport re\nfrom dataclasses import asdict, dataclass\nfrom datetime import date\nfrom glob import glob\nfrom typing import Any, Dict, List, Optional, Sequence\n\nimport yaml\nfrom slugify import slugify\n\nfrom movielog import db, has_sequence, humanize\nfrom movielog.logger import logger\n\nSEQUENCE = \"sequence\"\nFM_REGEX = re.compile(r\"^-{3,}\\s*$\", re.MULTILINE)\nREVIEWS = \"reviews\"\nTABLE_NAME = REVIEWS\nFOLDER_PATH = REVIEWS\nIMDB_ID = \"imdb_id\"\nTITLE = \"title\"\nEMPTY_STRING = \"\"\n\n\ndef represent_none(self: Any, _: Any) -> Any:\n return self.represent_scalar(\"tag:yaml.org,2002:null\", EMPTY_STRING)\n\n\nyaml.add_representer(type(None), represent_none) # type: ignore\n\n\n@dataclass\nclass Review(object):\n sequence: Optional[int]\n imdb_id: str\n title: str\n date: date\n grade: str\n venue: str\n file_path: Optional[str]\n grade_value: Optional[int] = None\n slug: Optional[str] = None\n venue_notes: Optional[str] = None\n review_content: Optional[str] = None\n\n @classmethod\n def from_yaml_object(cls, file_path: str, yaml_object: Dict[str, Any]) -> \"Review\":\n grade = yaml_object[\"grade\"]\n\n return Review(\n file_path=file_path,\n date=yaml_object[\"date\"],\n grade=grade,\n grade_value=cls.grade_value_for_grade(grade),\n title=yaml_object[\"title\"],\n imdb_id=yaml_object[IMDB_ID],\n sequence=yaml_object[\"sequence\"],\n slug=yaml_object[\"slug\"],\n venue=yaml_object[\"venue\"],\n venue_notes=yaml_object[\"venue_notes\"],\n )\n\n @classmethod\n def grade_value_for_grade(cls, grade: str) -> Optional[int]:\n if not grade:\n return None\n\n grade_map = {\n \"A\": 12,\n \"B\": 9,\n \"C\": 6,\n \"D\": 3,\n \"F\": 1,\n }\n\n grade_value = grade_map.get(grade[0], 3)\n modifier = grade[-1]\n\n if modifier == \"+\":\n grade_value = grade_value + 1\n\n if modifier == \"-\":\n grade_value = grade_value - 1\n\n return grade_value\n\n @classmethod\n def load_all(cls) -> Sequence[\"Review\"]:\n reviews: List[Review] = []\n for review_file_path in glob(os.path.join(FOLDER_PATH, \"*.md\")):\n reviews.append(Review.from_file_path(review_file_path))\n\n reviews.sort(key=operator.attrgetter(SEQUENCE))\n\n logger.log(\"Read {} {}.\", humanize.intcomma(len(reviews)), \"reviews\")\n return reviews\n\n @classmethod\n def from_file_path(cls, file_path: str) -> \"Review\":\n with open(file_path, \"r\") as review_file:\n _, fm, review_content = FM_REGEX.split(review_file.read(), 2)\n\n review = cls.from_yaml_object(file_path, yaml.safe_load(fm))\n review.file_path = file_path\n review.review_content = review_content\n\n return review\n\n def ensure_file_path(self) -> str:\n if not self.sequence:\n self.sequence = has_sequence.next_sequence(type(self).load_all())\n\n file_path = self.file_path\n\n if not file_path:\n file_name = slugify(\n \"{0:04d} {1}\".format(self.sequence, self.title),\n replacements=[(\"'\", EMPTY_STRING)],\n )\n file_path = os.path.join(FOLDER_PATH, \"{0}.md\".format(file_name))\n\n if not os.path.exists(os.path.dirname(file_path)):\n os.makedirs(os.path.dirname(file_path))\n\n return file_path\n\n def as_yaml(self) -> Dict[str, Any]:\n return {\n SEQUENCE: self.sequence,\n \"date\": self.date,\n IMDB_ID: self.imdb_id,\n TITLE: self.title,\n \"grade\": self.grade,\n \"slug\": slugify(self.title, replacements=[(\"'\", EMPTY_STRING)]),\n \"venue\": self.venue,\n \"venue_notes\": self.venue_notes,\n }\n\n def save(self) -> str:\n file_path = self.ensure_file_path()\n\n stripped_content = str(self.review_content or \"\").strip()\n\n with open(file_path, \"w\") as output_file:\n output_file.write(\"---\\n\")\n yaml.dump(\n self.as_yaml(),\n encoding=\"utf-8\",\n allow_unicode=True,\n default_flow_style=False,\n sort_keys=False,\n stream=output_file,\n )\n output_file.write(\"---\\n\\n\")\n output_file.write(stripped_content)\n\n self.file_path = file_path\n\n logger.log(\"Wrote {}\", self.file_path)\n\n return file_path\n\n\nclass ReviewsTable(db.Table):\n table_name = TABLE_NAME\n\n recreate_ddl = \"\"\"\n DROP TABLE IF EXISTS \"{0}\";\n CREATE TABLE \"{0}\" (\n \"id\" INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n \"movie_imdb_id\" TEXT NOT NULL REFERENCES movies(imdb_id) DEFERRABLE INITIALLY DEFERRED,\n \"date\" DATE NOT NULL,\n \"sequence\" INT NOT NULL,\n \"grade\" TEXT NOT NULL,\n \"grade_value\" INT NOT NULL,\n \"slug\" TEXT NOT NULL,\n \"venue\" TEXT NOT NULL);\n DROP TRIGGER IF EXISTS multiple_slugs;\n CREATE TRIGGER multiple_slugs\n BEFORE INSERT ON \"{0}\"\n BEGIN\n SELECT RAISE(FAIL, \"conflicting slugs\")\n FROM \"{0}\"\n WHERE movie_imdb_id = NEW.movie_imdb_id\n AND slug != NEW.slug;\n END;\n \"\"\"\n\n @classmethod\n def insert_reviews(cls, reviews: Sequence[Review]) -> None:\n ddl = \"\"\"\n INSERT INTO {0}(movie_imdb_id, date, sequence, grade, grade_value, slug, venue)\n VALUES(:imdb_id, :date, :sequence, :grade, :grade_value, :slug, :venue);\n \"\"\"\n\n parameter_seq = [asdict(review) for review in reviews]\n\n cls.insert(ddl=ddl.format(cls.table_name), parameter_seq=parameter_seq)\n cls.add_index(SEQUENCE)\n cls.add_index(\"movie_imdb_id\")\n cls.validate(reviews)\n\n\ndef update() -> None:\n logger.log(\"==== Begin updating {}...\", TABLE_NAME)\n\n reviews = Review.load_all()\n\n ReviewsTable.recreate()\n ReviewsTable.insert_reviews(reviews)\n\n\ndef add(\n imdb_id: str,\n title: str,\n review_date: date,\n year: int,\n grade: str,\n venue: str,\n venue_notes: Optional[str] = None,\n) -> Review:\n review = Review(\n imdb_id=imdb_id,\n title=\"{0} ({1})\".format(title, year),\n date=review_date,\n grade=grade,\n venue=venue,\n venue_notes=venue_notes,\n sequence=0,\n file_path=None,\n )\n\n review.save()\n\n return review\n\n\ndef existing_review(imdb_id: str) -> Optional[Review]:\n reviews = sorted(\n Review.load_all(), key=lambda review: review.sequence or 0, reverse=True\n )\n\n return next((review for review in reviews if review.imdb_id is imdb_id), None)\n\n\ndef export() -> None:\n Exporter.export()\n\n\nclass Exporter(object):\n @classmethod\n def fetch_reviews(cls) -> List[Dict[str, Any]]:\n reviews = []\n\n query = \"\"\"\n SELECT\n DISTINCT(reviews.movie_imdb_id) AS imdb_id\n , title\n , original_title\n , year\n , reviews.date\n , reviews.sequence\n , release_date\n , grade as last_review_grade\n , grade_value as last_review_grade_value\n , slug\n , sort_title\n , principal_cast_ids\n , runtime_minutes\n FROM reviews\n INNER JOIN movies ON reviews.movie_imdb_id = movies.imdb_id\n INNER JOIN release_dates ON reviews.movie_imdb_id = release_dates.movie_imdb_id\n INNER JOIN sort_titles ON reviews.movie_imdb_id = sort_titles.movie_imdb_id\n ORDER BY sort_title ASC;\n \"\"\"\n\n rows = db.exec_query(query)\n\n for row in rows:\n reviews.append(dict(row))\n\n return reviews\n\n @classmethod\n def fetch_directors_for_title_id(cls, title_imdb_id: str) -> List[Dict[str, Any]]:\n query = \"\"\"\n SELECT\n full_name\n FROM people\n INNER JOIN directing_credits ON person_imdb_id = imdb_id\n WHERE movie_imdb_id = \"{0}\";\n \"\"\"\n\n rows = db.exec_query(query.format(title_imdb_id))\n\n directors = []\n\n for row in rows:\n directors.append(dict(row))\n\n return directors\n\n @classmethod\n def fetch_countries_for_title_id(cls, title_imdb_id: str) -> List[str]:\n query = \"\"\"\n SELECT\n country\n FROM countries\n WHERE movie_imdb_id = \"{0}\";\n \"\"\"\n\n rows = db.exec_query(query.format(title_imdb_id))\n\n return [row[\"country\"] for row in rows]\n\n @classmethod\n def fetch_aka_titles_for_title_id(\n cls, title_imdb_id: str, title: str, original_title: str\n ) -> List[Dict[str, Any]]:\n query = \"\"\"\n SELECT\n title\n FROM aka_titles\n WHERE region = \"US\"\n AND movie_imdb_id = \"{0}\"\n AND title != \"{1}\"\n AND (attributes IS NULL\n OR (attributes NOT LIKE \"%working title%\"\n AND attributes NOT LIKE \"%alternative spelling%\"));\n \"\"\" # noqa: WPS323\n\n rows = db.exec_query(query.format(title_imdb_id, title))\n\n aka_titles = []\n\n for row in rows:\n aka_titles.append(row[TITLE])\n\n if original_title != title:\n if original_title not in aka_titles:\n aka_titles.append(original_title)\n\n return aka_titles\n\n @classmethod\n def fetch_principal_cast(\n cls, principal_cast_ids_with_commas: str\n ) -> List[Dict[str, Any]]:\n query = \"\"\"\n SELECT\n full_name\n FROM people\n WHERE imdb_id = \"{0}\";\n \"\"\"\n\n principal_cast = []\n\n for principal_cast_id in principal_cast_ids_with_commas.split(\",\"):\n rows = db.exec_query(query.format(principal_cast_id))\n\n for row in rows:\n principal_cast.append(dict(row))\n\n return principal_cast\n\n @classmethod\n def export(cls) -> None:\n logger.log(\"==== Begin exporting {}...\", \"reviewed movies\")\n\n reviews = Review.load_all()\n ReviewsTable.recreate()\n ReviewsTable.insert_reviews(reviews)\n\n review_rows = cls.fetch_reviews()\n\n for review_row in review_rows:\n review_row[\"directors\"] = cls.fetch_directors_for_title_id(\n title_imdb_id=review_row[IMDB_ID]\n )\n\n review_row[\"aka_titles\"] = cls.fetch_aka_titles_for_title_id(\n title_imdb_id=review_row[IMDB_ID],\n title=review_row[TITLE],\n original_title=review_row[\"original_title\"],\n )\n\n review_row[\"principal_cast\"] = cls.fetch_principal_cast(\n principal_cast_ids_with_commas=review_row[\"principal_cast_ids\"]\n )\n review_row[\"countries\"] = cls.fetch_countries_for_title_id(\n title_imdb_id=review_row[IMDB_ID]\n )\n\n file_path = os.path.join(\"export\", \"reviewed_movies.json\")\n\n with open(file_path, \"w\") as output_file:\n output_file.write(json.dumps([dict(row) for row in review_rows]))\n", "sub_path": "movielog/reviews.py", "file_name": "reviews.py", "file_ext": "py", "file_size_in_byte": 11302, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 26, "usage_type": "name"}, {"api_name": "yaml.add_representer", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 90, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "operator.attrgetter", "line_number": 94, "usage_type": "call"}, {"api_name": "movielog.logger.logger.log", "line_number": 96, "usage_type": "call"}, {"api_name": "movielog.logger.logger", "line_number": 96, "usage_type": "name"}, {"api_name": "movielog.humanize.intcomma", "line_number": 96, "usage_type": "call"}, {"api_name": "movielog.humanize", "line_number": 96, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 89, "usage_type": "name"}, {"api_name": "yaml.safe_load", "line_number": 104, "usage_type": "call"}, {"api_name": "movielog.has_sequence.next_sequence", "line_number": 112, "usage_type": "call"}, {"api_name": "movielog.has_sequence", "line_number": 112, "usage_type": "name"}, {"api_name": "slugify.slugify", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 123, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "slugify.slugify", "line_number": 135, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 128, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 128, "usage_type": "name"}, {"api_name": "yaml.dump", "line_number": 147, "usage_type": "call"}, {"api_name": "movielog.logger.logger.log", "line_number": 160, "usage_type": "call"}, {"api_name": "movielog.logger.logger", "line_number": 160, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 33, "usage_type": "name"}, {"api_name": "movielog.db.Table", "line_number": 165, "usage_type": "attribute"}, {"api_name": "movielog.db", "line_number": 165, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 191, "usage_type": "name"}, {"api_name": "dataclasses.asdict", "line_number": 197, "usage_type": "call"}, {"api_name": "movielog.logger.logger.log", "line_number": 206, "usage_type": "call"}, {"api_name": "movielog.logger.logger", "line_number": 206, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 217, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 221, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 239, "usage_type": "name"}, {"api_name": "movielog.db.exec_query", "line_number": 278, "usage_type": "call"}, {"api_name": "movielog.db", "line_number": 278, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 253, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 253, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 253, "usage_type": "name"}, {"api_name": "movielog.db.exec_query", "line_number": 295, "usage_type": "call"}, {"api_name": "movielog.db", "line_number": 295, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 286, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 286, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 286, "usage_type": "name"}, {"api_name": "movielog.db.exec_query", "line_number": 313, "usage_type": "call"}, {"api_name": "movielog.db", "line_number": 313, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 305, "usage_type": "name"}, {"api_name": "movielog.db.exec_query", "line_number": 333, "usage_type": "call"}, {"api_name": "movielog.db", "line_number": 333, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 320, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 320, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 320, "usage_type": "name"}, {"api_name": "movielog.db.exec_query", "line_number": 360, "usage_type": "call"}, {"api_name": "movielog.db", "line_number": 360, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 349, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 349, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 349, "usage_type": "name"}, {"api_name": "movielog.logger.logger.log", "line_number": 369, "usage_type": "call"}, {"api_name": "movielog.logger.logger", "line_number": 369, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 395, "usage_type": "call"}, {"api_name": "os.path", "line_number": 395, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 398, "usage_type": "call"}]} +{"seq_id": "155863438", "text": "\"\"\"\nCommand line interface programs for the GA4GH reference implementation.\n\nTODO: document how to use these for development and simple deployment.\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport time\nimport argparse\n\nimport ga4gh.frontend as frontend\nimport ga4gh.client as client\nimport ga4gh.backend as backend\nimport ga4gh.protocol as protocol\nimport ga4gh.datamodel.variants as variants\n\n##############################################################################\n# Server\n##############################################################################\n\n\ndef server_main(parser=None):\n if parser is None:\n parser = argparse.ArgumentParser(\n description=\"GA4GH reference server\")\n # Add global options\n parser.add_argument(\n \"--port\", \"-P\", default=8000, type=int,\n help=\"The port to listen on\")\n parser.add_argument(\n \"--config\", \"-C\", default='DefaultConfig', type=str,\n help=\"The configuration to use\")\n parser.add_argument(\n \"--config-file\", \"-F\", type=str,\n help=\"The configuration file to use\")\n\n subparsers = parser.add_subparsers(title='subcommands',)\n\n # help\n subparsers.add_parser(\n \"help\",\n description=\"ga4gh_server help\",\n help=\"show this help message and exit\")\n # Wormtable backend\n wtbParser = subparsers.add_parser(\n \"wormtable\",\n description=\"Serve the API using a wormtable based backend.\",\n help=\"Serve data from tables.\")\n wtbParser.add_argument(\n \"dataDir\",\n help=\"The directory containing the wormtables to be served.\")\n wtbParser.set_defaults(variantSetClass=variants.WormtableVariantSet)\n # Tabix\n tabixParser = subparsers.add_parser(\n \"tabix\",\n description=\"Serve the API using a tabix based backend.\",\n help=\"Serve data from Tabix indexed VCFs\")\n tabixParser.add_argument(\n \"dataDir\",\n help=\"The directory containing VCFs\")\n tabixParser.set_defaults(variantSetClass=variants.TabixVariantSet)\n\n args = parser.parse_args()\n if \"variantSetClass\" not in args:\n parser.print_help()\n else:\n frontend.configure(args.config, args.config_file)\n frontend.app.backend = backend.Backend(\n args.dataDir, args.variantSetClass)\n frontend.app.run(host=\"0.0.0.0\", port=args.port, debug=True)\n\n##############################################################################\n# Client\n##############################################################################\n\n\nclass AbstractQueryRunner(object):\n \"\"\"\n Abstract base class for runner classes\n \"\"\"\n def __init__(self, args):\n self._workarounds = set(args.workarounds.split(','))\n self._key = args.key\n self._verbosity = args.verbose\n self._httpClient = client.HttpClient(\n args.baseUrl, args.verbose, self._workarounds, self._key)\n\n def usingWorkaroundsFor(self, workaround):\n \"\"\"\n Returns true if we are using the passed-in workaround\n \"\"\"\n return workaround in self._workarounds\n\n\nclass AbstractGetRunner(AbstractQueryRunner):\n \"\"\"\n Abstract base class for get runner classes\n \"\"\"\n def __init__(self, args):\n super(AbstractGetRunner, self).__init__(args)\n self._id = args.id\n self._httpClient = client.HttpClient(\n args.baseUrl, args.verbose, self._workarounds, self._key)\n\n def _run(self, method):\n response = method(self._id)\n print(response.id)\n\n\nclass AbstractSearchRunner(AbstractQueryRunner):\n \"\"\"\n Abstract base class for search runner classes\n \"\"\"\n def __init__(self, args):\n super(AbstractSearchRunner, self).__init__(args)\n\n def _setRequest(self, request, args):\n \"\"\"\n Sets the _httpClient and other common attributes\n \"\"\"\n self._minimalOutput = args.minimalOutput\n if 'pageSize' in args:\n # GAListReferenceBasesRequest does not have a pageSize attr\n request.pageSize = args.pageSize\n self._request = request\n\n def _run(self, method, attrName=None):\n \"\"\"\n Runs the request given methodname and prints out\n the each result's attrName attribute if it is provided.\n If not, prints each entire result object.\n \"\"\"\n results = method(self._request)\n for result in results:\n if attrName is None:\n print(result)\n else:\n attr = getattr(result, attrName)\n print(attr)\n\n\nclass SearchVariantSetsRunner(AbstractSearchRunner):\n \"\"\"\n Runner class for the variantsets/search method.\n \"\"\"\n def __init__(self, args):\n super(SearchVariantSetsRunner, self).__init__(args)\n request = protocol.GASearchVariantSetsRequest()\n setCommaSeparatedAttribute(request, args, 'datasetIds')\n self._setRequest(request, args)\n\n def run(self):\n self._run(self._httpClient.searchVariantSets, 'datasetId')\n\n\nclass SearchVariantsRunner(AbstractSearchRunner):\n \"\"\"\n Runner class for the variants/search method.\n \"\"\"\n def __init__(self, args):\n super(SearchVariantsRunner, self).__init__(args)\n request = protocol.GASearchVariantsRequest()\n request.referenceName = args.referenceName\n request.variantName = args.variantName\n request.start = args.start\n request.end = args.end\n if self.usingWorkaroundsFor(client.HttpClient.workaroundGoogle):\n request.maxCalls = args.maxCalls\n if args.callSetIds == []:\n request.callSetIds = []\n elif args.callSetIds == '*':\n request.callSetIds = None\n else:\n request.callSetIds = args.callSetIds.split(\",\")\n setCommaSeparatedAttribute(request, args, 'variantSetIds')\n self._setRequest(request, args)\n\n def run(self):\n if self._minimalOutput:\n self._run(self._httpClient.searchVariants, 'id')\n else:\n results = self._httpClient.searchVariants(self._request)\n for result in results:\n self.printVariant(result)\n\n def printVariant(self, variant):\n \"\"\"\n Prints out the specified GAVariant object in a VCF-like form.\n \"\"\"\n print(\n variant.id, variant.variantSetId, variant.names,\n variant.referenceName, variant.start, variant.end,\n variant.referenceBases, variant.alternateBases,\n sep=\"\\t\", end=\"\\t\")\n for key, value in variant.info.items():\n print(key, value, sep=\"=\", end=\";\")\n print(\"\\t\", end=\"\")\n for c in variant.calls:\n print(\n c.callSetId, c.genotype, c.genotypeLikelihood, c.info,\n c.phaseset, sep=\":\", end=\"\\t\")\n print()\n\n\nclass SearchReferenceSetsRunner(AbstractSearchRunner):\n \"\"\"\n Runner class for the referencesets/search method.\n \"\"\"\n def __init__(self, args):\n super(SearchReferenceSetsRunner, self).__init__(args)\n request = protocol.GASearchReferenceSetsRequest()\n setCommaSeparatedAttribute(request, args, 'accessions')\n setCommaSeparatedAttribute(request, args, 'md5checksums')\n self._setRequest(request, args)\n\n def run(self):\n self._run(self._httpClient.searchReferenceSets, 'id')\n\n\nclass SearchReferencesRunner(AbstractSearchRunner):\n \"\"\"\n Runner class for the references/search method\n \"\"\"\n def __init__(self, args):\n super(SearchReferencesRunner, self).__init__(args)\n request = protocol.GASearchReferencesRequest()\n setCommaSeparatedAttribute(request, args, 'accessions')\n setCommaSeparatedAttribute(request, args, 'md5checksums')\n self._setRequest(request, args)\n\n def run(self):\n self._run(self._httpClient.searchReferences, 'id')\n\n\nclass SearchReadGroupSetsRunner(AbstractSearchRunner):\n \"\"\"\n Runner class for the readgroupsets/search method\n \"\"\"\n def __init__(self, args):\n super(SearchReadGroupSetsRunner, self).__init__(args)\n request = protocol.GASearchReadGroupSetsRequest()\n setCommaSeparatedAttribute(request, args, 'datasetIds')\n request.name = args.name\n self._setRequest(request, args)\n\n def run(self):\n self._run(self._httpClient.searchReadGroupSets, 'id')\n\n\nclass SearchCallSetsRunner(AbstractSearchRunner):\n \"\"\"\n Runner class for the callsets/search method\n \"\"\"\n def __init__(self, args):\n super(SearchCallSetsRunner, self).__init__(args)\n request = protocol.GASearchCallSetsRequest()\n setCommaSeparatedAttribute(request, args, 'variantSetIds')\n request.name = args.name\n self._setRequest(request, args)\n\n def run(self):\n self._run(self._httpClient.searchCallSets, 'id')\n\n\nclass SearchReadsRunner(AbstractSearchRunner):\n \"\"\"\n Runner class for the reads/search method\n \"\"\"\n class GASearchReadsRequestGoogle(protocol.ProtocolElement):\n\n __slots__ = ['end', 'pageSize', 'pageToken', 'readGroupIds',\n 'referenceName', 'start']\n\n def __init__(self):\n self.end = None\n self.pageSize = None\n self.pageToken = None\n self.readGroupIds = []\n self.referenceName = None\n self.start = 0\n\n def __init__(self, args):\n super(SearchReadsRunner, self).__init__(args)\n request = protocol.GASearchReadsRequest()\n if self.usingWorkaroundsFor(client.HttpClient.workaroundGoogle):\n # google says referenceId not a valid field\n request = self.GASearchReadsRequestGoogle()\n setCommaSeparatedAttribute(request, args, 'readGroupIds')\n request.start = args.start\n request.end = args.end\n request.referenceId = args.referenceId\n request.referenceName = args.referenceName\n self._setRequest(request, args)\n\n def run(self):\n self._run(self._httpClient.searchReads, 'id')\n\n\nclass ListReferenceBasesRunner(AbstractSearchRunner):\n \"\"\"\n Runner class for the references/{id}/bases method\n \"\"\"\n def __init__(self, args):\n super(ListReferenceBasesRunner, self).__init__(args)\n request = protocol.GAListReferenceBasesRequest()\n request.start = args.start\n request.end = args.end\n self._id = args.id\n self._setRequest(request, args)\n\n def run(self):\n method = self._httpClient.listReferenceBases\n for base in method(self._request, self._id):\n print(base.sequence)\n\n\nclass GetReferenceSetRunner(AbstractGetRunner):\n \"\"\"\n Runner class for the referencesets/{id} method\n \"\"\"\n def __init__(self, args):\n super(GetReferenceSetRunner, self).__init__(args)\n\n def run(self):\n self._run(self._httpClient.getReferenceSet)\n\n\nclass GetReferenceRunner(AbstractGetRunner):\n \"\"\"\n Runner class for the references/{id} method\n \"\"\"\n def __init__(self, args):\n super(GetReferenceRunner, self).__init__(args)\n\n def run(self):\n self._run(self._httpClient.getReference)\n\n\nclass BenchmarkRunner(SearchVariantsRunner):\n \"\"\"\n Runner class for the client side benchmarking. This is intended to give\n rough figures on protocol throughput on the server side over various\n requests.\n \"\"\"\n def run(self):\n numVariants = 0\n beforeCpu = time.clock()\n beforeWall = time.time()\n try:\n for variant in self._httpClient.searchVariants(self._request):\n numVariants += 1\n except KeyboardInterrupt:\n pass\n cpuTime = time.clock() - beforeCpu\n wallTime = time.time() - beforeWall\n totalBytes = self._httpClient.getBytesRead()\n totalBytes /= 1024 * 1024\n s = \"read {0} variants in {1:.2f} seconds; CPU time {2:.2f}\".format(\n numVariants, wallTime, cpuTime)\n s += \"; {0:.2f} MB @ {1:.2f} MB/s; {2:.2f} vars/s\".format(\n totalBytes, totalBytes / wallTime, numVariants / wallTime)\n print(s)\n\n\ndef addVariantSearchOptions(parser):\n \"\"\"\n Adds common options to a variant searches command line parser.\n \"\"\"\n addVariantSetIdsArgument(parser)\n parser.add_argument(\n \"--referenceName\", \"-r\", default=\"chrSim\",\n help=\"Only return variants on this reference.\")\n parser.add_argument(\n \"--variantName\", \"-n\", default=None,\n help=\"Only return variants which have exactly this name.\")\n parser.add_argument(\n \"--callSetIds\", \"-c\", default=[],\n help=\"\"\"Return variant calls which belong to call sets\n with these IDs. Pass in IDs as a comma separated list (no spaces),\n or '*' (with the single quotes!) to indicate 'all call sets'.\n Omit this option to indicate 'no call sets'.\n \"\"\")\n addStartArgument(parser)\n addEndArgument(parser)\n addPageSizeArgument(parser)\n # maxCalls not in protocol; supported by google\n parser.add_argument(\n \"--maxCalls\", default=1,\n help=\"The maxiumum number of calls to return\")\n\n\ndef addVariantSetIdsArgument(parser):\n parser.add_argument(\n \"--variantSetIds\", \"-V\",\n help=\"The variant set id(s) to search over\")\n\n\ndef addStartArgument(parser):\n parser.add_argument(\n \"--start\", \"-s\", default=0, type=int,\n help=\"The start of the search range (inclusive).\")\n\n\ndef addEndArgument(parser):\n parser.add_argument(\n \"--end\", \"-e\", default=1, type=int,\n help=\"The end of the search range (exclusive).\")\n\n\ndef addIdArgument(parser):\n parser.add_argument(\"--id\", default=None, help=\"The id of the object\")\n\n\ndef addGetArguments(parser):\n addIdArgument(parser)\n addUrlArgument(parser)\n\n\ndef addUrlArgument(parser):\n \"\"\"\n Adds the URL endpoint argument to the specified parser.\n \"\"\"\n parser.add_argument(\"baseUrl\", help=\"The URL of the API endpoint\")\n\n\ndef addAccessionsArgument(parser):\n parser.add_argument(\n \"--accessions\", default=None,\n help=\"The accessions to search over\")\n\n\ndef addMd5ChecksumsArgument(parser):\n parser.add_argument(\n \"--md5checksums\", default=None,\n help=\"The md5checksums to search over\")\n\n\ndef addPageSizeArgument(parser):\n parser.add_argument(\n \"--pageSize\", \"-m\", default=100, type=int,\n help=\"The maximum number of results returned in one response.\")\n\n\ndef addDatasetIdsArgument(parser):\n parser.add_argument(\n \"--datasetIds\", default=None,\n help=\"The datasetIds to search over\")\n\n\ndef addNameArgument(parser):\n parser.add_argument(\n \"--name\", default=None,\n help=\"The name to search over\")\n\n\ndef setCommaSeparatedAttribute(request, args, attr):\n attribute = getattr(args, attr)\n if attribute is not None:\n setattr(request, attr, attribute.split(\",\"))\n\n\ndef client_main(parser=None):\n if parser is None:\n parser = argparse.ArgumentParser(\n description=\"GA4GH reference client\")\n # Add global options\n parser.add_argument('--verbose', '-v', action='count', default=0)\n parser.add_argument(\n \"--workarounds\", \"-w\", default='', help=\"The workarounds to use\")\n parser.add_argument(\n \"--key\", \"-k\", help=\"The auth key to use\")\n parser.add_argument(\n \"--minimalOutput\", \"-O\", default=False,\n help=\"Use minimal output; default False\",\n action='store_true')\n subparsers = parser.add_subparsers(title='subcommands')\n\n # help\n subparsers.add_parser(\n \"help\", description=\"ga4gh_client help\",\n help=\"show this help message and exit\")\n\n # benchmarking\n bmParser = subparsers.add_parser(\n \"benchmark\",\n description=\"Run simple benchmarks on the various methods\",\n help=\"Benchmark server performance\")\n bmParser.set_defaults(runner=BenchmarkRunner)\n addUrlArgument(bmParser)\n addVariantSearchOptions(bmParser)\n\n # variants/search\n vsParser = subparsers.add_parser(\n \"variants-search\",\n description=\"Search for variants\",\n help=\"Search for variants.\")\n vsParser.set_defaults(runner=SearchVariantsRunner)\n addUrlArgument(vsParser)\n addVariantSearchOptions(vsParser)\n\n # variantsets/search\n vssParser = subparsers.add_parser(\n \"variantsets-search\",\n description=\"Search for variantSets\",\n help=\"Search for variantSets.\")\n vssParser.set_defaults(runner=SearchVariantSetsRunner)\n addUrlArgument(vssParser)\n addPageSizeArgument(vssParser)\n addDatasetIdsArgument(vssParser)\n\n # referencesets/search\n rssParser = subparsers.add_parser(\n \"referencesets-search\",\n description=\"Search for referenceSets\",\n help=\"Search for referenceSets\")\n rssParser.set_defaults(runner=SearchReferenceSetsRunner)\n addUrlArgument(rssParser)\n addPageSizeArgument(rssParser)\n addAccessionsArgument(rssParser)\n addMd5ChecksumsArgument(rssParser)\n rssParser.add_argument(\n \"--assemblyId\",\n help=\"The assembly id to search over\")\n\n # references/search\n rsParser = subparsers.add_parser(\n \"references-search\",\n description=\"Search for references\",\n help=\"Search for references\")\n rsParser.set_defaults(runner=SearchReferencesRunner)\n addUrlArgument(rsParser)\n addPageSizeArgument(rsParser)\n addAccessionsArgument(rsParser)\n addMd5ChecksumsArgument(rsParser)\n\n # readgroupsets/search\n rgsParser = subparsers.add_parser(\n \"readgroupsets-search\",\n description=\"Search for readGroupSets\",\n help=\"Search for readGroupSets\")\n rgsParser.set_defaults(runner=SearchReadGroupSetsRunner)\n addUrlArgument(rgsParser)\n addPageSizeArgument(rgsParser)\n addDatasetIdsArgument(rgsParser)\n addNameArgument(rgsParser)\n\n # callsets/search\n csParser = subparsers.add_parser(\n \"callsets-search\",\n description=\"Search for callSets\",\n help=\"Search for callSets\")\n csParser.set_defaults(runner=SearchCallSetsRunner)\n addUrlArgument(csParser)\n addPageSizeArgument(csParser)\n addNameArgument(csParser)\n addVariantSetIdsArgument(csParser)\n\n # reads/search\n rParser = subparsers.add_parser(\n \"reads-search\",\n description=\"Search for reads\",\n help=\"Search for reads\")\n rParser.set_defaults(runner=SearchReadsRunner)\n addUrlArgument(rParser)\n addPageSizeArgument(rParser)\n addStartArgument(rParser)\n addEndArgument(rParser)\n rParser.add_argument(\n \"--readGroupIds\", default=None,\n help=\"The readGroupIds to search over\")\n rParser.add_argument(\n \"--referenceId\", default=None,\n help=\"The referenceId to search over\")\n rParser.add_argument(\n \"--referenceName\", default=None,\n help=\"The referenceName to search over\")\n\n # referencesets/{id}\n rsidParser = subparsers.add_parser(\n \"referencesets-get\",\n description=\"Get a referenceset\",\n help=\"Get a referenceset\")\n rsidParser.set_defaults(runner=GetReferenceSetRunner)\n addGetArguments(rsidParser)\n\n # references/{id}\n ridParser = subparsers.add_parser(\n \"references-get\",\n description=\"Get a reference\",\n help=\"Get a reference\")\n ridParser.set_defaults(runner=GetReferenceRunner)\n addGetArguments(ridParser)\n\n # referencesets/{id}/bases\n basesParser = subparsers.add_parser(\n \"references-list-bases\",\n description=\"List bases of a reference\",\n help=\"List bases of a reference\")\n basesParser.set_defaults(runner=ListReferenceBasesRunner)\n addGetArguments(basesParser)\n addStartArgument(basesParser)\n addEndArgument(basesParser)\n\n args = parser.parse_args()\n if \"runner\" not in args:\n parser.print_help()\n else:\n runner = args.runner(args)\n runner.run()\n", "sub_path": "ga4gh/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 19915, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}, {"api_name": "ga4gh.datamodel.variants.WormtableVariantSet", "line_number": 54, "usage_type": "attribute"}, {"api_name": "ga4gh.datamodel.variants", "line_number": 54, "usage_type": "name"}, {"api_name": "ga4gh.datamodel.variants.TabixVariantSet", "line_number": 63, "usage_type": "attribute"}, {"api_name": "ga4gh.datamodel.variants", "line_number": 63, "usage_type": "name"}, {"api_name": "ga4gh.frontend.configure", "line_number": 69, "usage_type": "call"}, {"api_name": "ga4gh.frontend", "line_number": 69, "usage_type": "name"}, {"api_name": "ga4gh.frontend.app", "line_number": 70, "usage_type": "attribute"}, {"api_name": "ga4gh.frontend", "line_number": 70, "usage_type": "name"}, {"api_name": "ga4gh.backend.Backend", "line_number": 70, "usage_type": "call"}, {"api_name": "ga4gh.backend", "line_number": 70, "usage_type": "name"}, {"api_name": "ga4gh.frontend.app.run", "line_number": 72, "usage_type": "call"}, {"api_name": "ga4gh.frontend.app", "line_number": 72, "usage_type": "attribute"}, {"api_name": "ga4gh.frontend", "line_number": 72, "usage_type": "name"}, {"api_name": "ga4gh.client.HttpClient", "line_number": 87, "usage_type": "call"}, {"api_name": "ga4gh.client", "line_number": 87, "usage_type": "name"}, {"api_name": "ga4gh.client.HttpClient", "line_number": 104, "usage_type": "call"}, {"api_name": "ga4gh.client", "line_number": 104, "usage_type": "name"}, {"api_name": "ga4gh.protocol.GASearchVariantSetsRequest", "line_number": 150, "usage_type": "call"}, {"api_name": "ga4gh.protocol", "line_number": 150, "usage_type": "name"}, {"api_name": "ga4gh.protocol.GASearchVariantsRequest", "line_number": 164, "usage_type": "call"}, {"api_name": "ga4gh.protocol", "line_number": 164, "usage_type": "name"}, {"api_name": "ga4gh.client.HttpClient", "line_number": 169, "usage_type": "attribute"}, {"api_name": "ga4gh.client", "line_number": 169, "usage_type": "name"}, {"api_name": "ga4gh.protocol.GASearchReferenceSetsRequest", "line_number": 213, "usage_type": "call"}, {"api_name": "ga4gh.protocol", "line_number": 213, "usage_type": "name"}, {"api_name": "ga4gh.protocol.GASearchReferencesRequest", "line_number": 228, "usage_type": "call"}, {"api_name": "ga4gh.protocol", "line_number": 228, "usage_type": "name"}, {"api_name": "ga4gh.protocol.GASearchReadGroupSetsRequest", "line_number": 243, "usage_type": "call"}, {"api_name": "ga4gh.protocol", "line_number": 243, "usage_type": "name"}, {"api_name": "ga4gh.protocol.GASearchCallSetsRequest", "line_number": 258, "usage_type": "call"}, {"api_name": "ga4gh.protocol", "line_number": 258, "usage_type": "name"}, {"api_name": "ga4gh.protocol.ProtocolElement", "line_number": 271, "usage_type": "attribute"}, {"api_name": "ga4gh.protocol", "line_number": 271, "usage_type": "name"}, {"api_name": "ga4gh.protocol.GASearchReadsRequest", "line_number": 286, "usage_type": "call"}, {"api_name": "ga4gh.protocol", "line_number": 286, "usage_type": "name"}, {"api_name": "ga4gh.client.HttpClient", "line_number": 287, "usage_type": "attribute"}, {"api_name": "ga4gh.client", "line_number": 287, "usage_type": "name"}, {"api_name": "ga4gh.protocol.GAListReferenceBasesRequest", "line_number": 307, "usage_type": "call"}, {"api_name": "ga4gh.protocol", "line_number": 307, "usage_type": "name"}, {"api_name": "time.clock", "line_number": 349, "usage_type": "call"}, {"api_name": "time.time", "line_number": 350, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 356, "usage_type": "call"}, {"api_name": "time.time", "line_number": 357, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 466, "usage_type": "call"}]} +{"seq_id": "336420798", "text": "import speech_recognition as sr\nimport json\nfrom pyautogui import typewrite, press\n\n\n\nrecog = sr.Recognizer()\nmic = sr.Microphone()\ndef getInput():\n voiceInput = \"\"\n \n with mic as source:\n audio = recog.listen(source)\n try:\n \n voiceInput = recog.recognize_google(audio)\n print(voiceInput)\n \n except sr.UnknownValueError:\n print(\"Miku no understand ;-;\")\n except sr.RequestError:\n print(\"Miku is break ;-;\")\n return voiceInput \n\ngetInput()\n \n", "sub_path": "mikucsharp/miku.py", "file_name": "miku.py", "file_ext": "py", "file_size_in_byte": 520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "speech_recognition.Recognizer", "line_number": 7, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 8, "usage_type": "call"}, {"api_name": "speech_recognition.UnknownValueError", "line_number": 19, "usage_type": "attribute"}, {"api_name": "speech_recognition.RequestError", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "430756200", "text": "import cv2\nimport numpy as np\nimport os\nfrom pylsd import lsd\n\n#full_name = 'ntuWoUDpeSk.jpeg'\n#img = cv2.imread(full_name, cv2.IMREAD_COLOR)\n#img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n#segments = lsd(img_gray, scale=0.8, sigma_scale=0.9, ang_th=22.5, quant=2.0, eps=150, density_th=0.7, n_bins=1024, max_grad=255.0)\n\n#for i in range(segments.shape[0]):\n# pt1 = (int(segments[i, 0]), int(segments[i, 1]))\n# pt2 = (int(segments[i, 2]), int(segments[i, 3]))\n# width = segments[i, 4]\n# cv2.line(img, pt1, pt2, (0, 0, 255), int(np.ceil(width)))\n\n#cv2.imwrite('output.jpg', img)\n\ncap = cv2.VideoCapture('WIN_20210103_20_39_07_Pro.mp4')\n\nwhile(cap.isOpened()):\n ret, frame = cap.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n#, scale=0.8, sigma_scale=0.9, ang_th=22.5, quant=2.0, eps=150, density_th=0.7, n_bins=1024, max_grad=255.0\n segments = lsd(gray)\n\n for i in range(segments.shape[0]):\n pt1 = (int(segments[i, 0]), int(segments[i, 1]))\n pt2 = (int(segments[i, 2]), int(segments[i, 3]))\n width = segments[i, 4]\n cv2.line(frame, pt1, pt2, (0, 0, 255), int(np.ceil(width)))\n\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n", "sub_path": "CV/LineSegmentDetector/lsd.py", "file_name": "lsd.py", "file_ext": "py", "file_size_in_byte": 1268, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "cv2.VideoCapture", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pylsd.lsd", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "575219972", "text": "import ntplib\nimport time\nimport threading\n\ndef getNetTime():\n c = ntplib.NTPClient()\n response = c.request('uk.pool.ntp.org', version=3)\n return response.tx_time\n\ndef getRealTime():\n global currentTime\n return time.ctime(currentTime)\n\nclass clock(threading.Thread):\n global currentTime\n def __init__(self, name):\n threading.Thread.__init__(self)\n self.name = name\n def run(self):\n global currentTime\n currentTime = getNetTime()\n while True:\n time.sleep(1)\n currentTime = currentTime+1\n\ndef startClock():\n global currentTime\n currentTime = 0\n cThread = clock(clock)\n cThread.start()\n while currentTime == 0:\n time.sleep(0.1)\n", "sub_path": "realtime.py", "file_name": "realtime.py", "file_ext": "py", "file_size_in_byte": 728, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "ntplib.NTPClient", "line_number": 6, "usage_type": "call"}, {"api_name": "time.ctime", "line_number": 12, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 14, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 17, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 17, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "182701399", "text": "import os\nimport numpy as np\nimport math as m\nfrom matplotlib import pyplot as plt\n\nax = plt.gca()\n\nDATA_DIR = \"square\"\n\nDATA = \"trajectory/\"\nFILE_NAME = DATA + \"out\"\n\nf = open(FILE_NAME, 'r')\nfor line in f.readlines():\n est = np.array(line.split(\" \")[:-1])\n print(est)\n est = est.astype(np.float)\n if((not np.isnan(est[0])) & (not np.isnan(est[1]))):\n plt.plot(est[0], est[1], 'b+')\n\nax.set_xlim((0, 35))\nax.set_ylim((-3.6, 3.6))\n# plt.axes().set_aspect('equal', 'datalim')\nplt.axes().set_aspect('equal')\nplt.grid()\nplt.show()", "sub_path": "software/algorithm/trajectory_analysis.py", "file_name": "trajectory_analysis.py", "file_ext": "py", "file_size_in_byte": 546, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "matplotlib.pyplot.gca", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 6, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "398799885", "text": "\"\"\"\nThis script contains the functions used for analysis of fluence distribution\n\"\"\"\n\nimport logging\nimport numpy as np\nfrom numba import njit # Make analysis go brrrrr\nfrom tqdm import tqdm # Show progress\n\n# Package imports\nfrom irrad_control.analysis.constants import elementary_charge\n\n\n# This is the main function\ndef generate_fluence_map(beam_data, scan_data, beam_sigma, bins=(100, 100)):\n \"\"\"\n Generates a two-dimensional fluence map of the entire scan area from irrad_control output data.\n \n Parameters\n ----------\n beam_data : np.array, pytables.Table\n Beam data of irradiation\n scan_data : np.array, pytables.Table\n Scan data of irradiation\n beam_sigma : tuple\n Beam sigma of the 2D Gaussian beam profile in mm\n bins : tuple, optional\n Binning of the generated fluence map, by default (100, 100)\n CAUTION: the binning is numpy shape, therefore bins are (Y, X)\n\n Returns\n -------\n tuple: (np.ndarray, np.ndarray, np.ndarray, np.ndarray)\n Tuple containing fluence map, fluence map error, bin_centers_x, bin_centers_y\n \"\"\"\n\n total_scans = np.max(scan_data['scan']) + 1\n total_rows = total_scans * scan_data['n_rows'][0]\n\n logging.info(f\"Generating fluence distribution from {total_scans} scans, containing {total_rows} total rows\")\n logging.info(\"Using Gaussian beam model with dimensions {}_x = {} mm, {}_y = {}mm\".format(u'\\u03c3', beam_sigma[0], u'\\u03c3', beam_sigma[1]))\n\n # Get number of rows; FIXME: get n_rows from *Irrad* data\n n_rows = np.max(scan_data['row']) + 1 # Rows start at 0\n \n # Get scan area; FIXME: get scan area from *Irrad* data\n # Everything in base unit mm\n scan_area_start = (scan_data[0]['row_start_x'], scan_data[n_rows]['row_start_y'])\n scan_area_end = (scan_data[0]['row_stop_x'], scan_data[0]['row_start_y'])\n\n # Fluence map\n fluence_map = np.zeros(shape=bins)\n fluence_map_error = np.zeros_like(fluence_map)\n\n # Create fluence map bin edge points\n map_bin_edges_y = np.linspace(0, abs(scan_area_start[1] - scan_area_end[1]), bins[0] + 1)\n map_bin_edges_x = np.linspace(0, abs(scan_area_end[0] - scan_area_start[0]), bins[1] + 1)\n \n # Create fluence map bin centers\n map_bin_centers_y = 0.5 * (map_bin_edges_y[:-1] + map_bin_edges_y[1:])\n map_bin_centers_x = 0.5 * (map_bin_edges_x[:-1] + map_bin_edges_x[1:])\n\n logging.info(f\"Initializing fluence map of ({map_bin_edges_x[-1]:.2f}x{map_bin_edges_y[-1]:.2f}) mm² scan area in {bins[1]}x{bins[0]} bins\")\n \n # Row bin times\n row_bin_transit_times = np.zeros_like(map_bin_centers_x)\n\n # Index that keeps track how far we have advanced trough the beam data\n current_row_idx = 0\n\n # Loop over scanned rows\n for row_data in tqdm(scan_data, desc='Generating fluence distribution', unit='rows'):\n\n current_row_idx = _process_row(row_data=row_data,\n beam_data=beam_data,\n fluence_map=fluence_map,\n fluence_map_error=fluence_map_error,\n row_bin_transit_times=row_bin_transit_times,\n map_bin_edges_x=map_bin_edges_x,\n map_bin_centers_x=map_bin_centers_x,\n map_bin_centers_y=map_bin_centers_y,\n beam_sigma=beam_sigma,\n scan_y_offset=scan_area_end[-1],\n current_row_idx=current_row_idx)\n\n logging.info(f\"Finished generating fluence distribution.\")\n \n # Take sqrt of error map squared\n fluence_map_error = np.sqrt(fluence_map_error) \n\n # Scale from protons / mm² (intrinsic unit) to protons / cm²\n fluence_map *= 100\n fluence_map_error *= 100\n\n return fluence_map, fluence_map_error, map_bin_centers_x, map_bin_centers_y\n\n\ndef extract_dut_map(fluence_map, map_bin_centers_x, map_bin_centers_y, dut_rectangle, center_symm=False):\n \"\"\"\n Extracts the DUT region from the fluence map.\n\n Parameters\n ----------\n fluence_map : 2D np.ndarray\n Fluence map\n map_bin_centers_x : np.ndarray\n Bin centers of the fluence map in x a.k.a scan direction\n map_bin_centers_y : np.ndarray\n Bin centers of the fluence map in y a.k.a row direction\n dut_rectangle : tuple\n Relative position of the DUT rectangle in the form of (x_min, y_min, x_max, y_max) or (x_width, y_width) if *center_symm* is True\n center_symm: bool\n If True, the *dut_rectangle* has the form of (x_width, y_width) and the exctracted map is centered symmetrically\n\n (0, 0)----------------------------------------------------------------------------- \n | Fluence map |\n | |\n | --- (x_min, y_min) ----------------------------- |\n | | | | |\n | | | | |\n | | y_width | DUT map | |\n | | | | |\n | | | | |\n | | | | |\n | --- ----------------------------- (x_max, y_max) |\n | |\n | |----------x_width----------| |\n ------------------------------------------------------------------------------\n Returns\n -------\n tuple\n (2D np.ndarray, 1D np.ndarray, 1D np.ndarray) -> (DUT_fluence_map, DUT_map_bins_x, DUT_map_bins_y)\n \"\"\"\n\n if center_symm and len(dut_rectangle) != 2:\n raise ValueError(\"*dut_rectangle* needs to be in the form of (x_width, y_width)\")\n \n if not center_symm and len(dut_rectangle) != 4:\n raise ValueError(\"*dut_rectangle needs to be in the form of (x_min, y_min, x_max, y_max)\")\n\n scan_area_x = map_bin_centers_x[-1] + (map_bin_centers_x[1] - map_bin_centers_x[0])/2.\n scan_area_y = map_bin_centers_y[-1] + (map_bin_centers_y[1] - map_bin_centers_y[0])/2.\n\n # Make map edges\n map_bin_edges_x = np.linspace(0, scan_area_x, len(map_bin_centers_x)+1)\n map_bin_edges_y = np.linspace(0, scan_area_y, len(map_bin_centers_y)+1)\n\n if center_symm:\n # Extract scan dimensions \n dut_rectangle = ((scan_area_x - dut_rectangle[0])/2., (scan_area_y - dut_rectangle[1])/2.,\n (scan_area_x + dut_rectangle[0])/2., (scan_area_y + dut_rectangle[1])/2.)\n\n x_min_idx, x_max_idx = np.searchsorted(map_bin_edges_x, dut_rectangle[0]), np.searchsorted(map_bin_edges_x, dut_rectangle[-2], side='right')\n y_min_idx, y_max_idx = np.searchsorted(map_bin_edges_y, dut_rectangle[1]), np.searchsorted(map_bin_edges_y, dut_rectangle[-1], side='right')\n\n return fluence_map[y_min_idx:y_max_idx, x_min_idx:x_max_idx], map_bin_centers_x[x_min_idx:x_max_idx], map_bin_centers_y[y_min_idx:y_max_idx]\n\n\n@njit\ndef gauss_2d_pdf(x, y, mu_x, mu_y, sigma_x, sigma_y, amplitude, normalized=False):\n \"\"\"\n 2D normal distribution PDF according to\n https://en.wikipedia.org/wiki/Gaussian_function#Two-dimensional_Gaussian_function\n\n Parameters\n ----------\n x : float\n Location along first dimension\n y : float\n Location along second dimension\n mu_x : float\n Mean of distribution in first dimension\n mu_y : float\n Mean of distribution in second dimension\n sigma_x : float\n Standard deviation in first dimension\n sigma_y : float\n Standard deviation in second dimension\n amplitude : float\n Amplitude of distribution; must be normalized for correct results e.g. integral(gauss_2D_pdf) == 1\n normalized : bool, optional\n Whether to normaliz amplitude, by default False\n\n Returns\n -------\n float\n Probability at given input\n \"\"\"\n # Amplitude; normalize if needed to satisfy integral(gauss_2D_pdf) == 1\n norm_amplitude = amplitude if normalized else gauss_2d_norm(amplitude=amplitude, sigma_x=sigma_x, sigma_y=sigma_y)\n\n # Exponent\n exponent = -0.5 * (np.square((x - mu_x) / sigma_x) + np.square((y - mu_y) / sigma_y))\n\n return norm_amplitude * np.exp(exponent)\n\n\n@njit\ndef gauss_2d_volume(amplitude, sigma_x, sigma_y):\n \"\"\"\n Volume under 2D Gaussian distribution according to\n https://en.wikipedia.org/wiki/Gaussian_function#Two-dimensional_Gaussian_function\n\n Parameters\n ----------\n amplitude : float\n Amplitude of distribution; must be normalized for correct results e.g. integral(gauss_2D_pdf) == 1\n sigma_x : float\n Standard deviation in first dimension\n sigma_y : float\n Standard deviation in second dimension\n\n Returns\n -------\n float\n Volume under 2D Gaussian with given input parameters\n \"\"\"\n return 2 * np.pi * amplitude * sigma_x * sigma_y\n\n\n@njit\ndef gauss_2d_norm(amplitude, sigma_x, sigma_y):\n \"\"\"\n Calculate normalized amplitude to satisfy integral(gauss_2D_pdf) == 1\n \n Parameters\n ----------\n amplitude : float\n Amplitude of distribution to normalize\n sigma_x : float\n Standard deviation in first dimension\n sigma_y : float\n Standard deviation in second dimension\n\n Returns\n -------\n float\n Normalized amplitude\n \"\"\"\n return amplitude / (2 * np.pi * sigma_x * sigma_y)\n\n\n@njit\ndef apply_gauss_2d_kernel(map_2d, map_2d_error, amplitude, amplitude_error, bin_centers_x, bin_centers_y, mu_x, mu_y, sigma_x, sigma_y, normalized, skip_sigmas=6):\n \"\"\"\n Applies a 2D Gaussian kernel on *map_2d* and *map_2d_error*, along given bin centers in x and y dimension. See *gauss_2d_pdf* function\n for more info.\n\n Parameters\n ----------\n map_2d : np.ndarray\n Input map to apply kernel to which satisfies len(map_2d.shape)==2\n map_2d_error : np.ndarray\n Input error map to apply kernel to which satisfies len(map_2d.shape)==2\n amplitude : float\n Amplitude of distribution; must be normalized for correct results e.g. integral(gauss_2D_pdf) == 1\n amplitude_error : float\n Amplitude of error distribution; must be normalized for correct results e.g. integral(gauss_2D_pdf) == 1\n bin_centers_x : np.ndarray\n [description]\n bin_centers_y : np.ndarray\n [description]\n mu_x : float\n Mean of distribution in first dimension\n mu_y : float\n Mean of distribution in second dimension\n sigma_x : float\n Standard deviation in first dimension\n sigma_y : float\n Standard deviation in second dimension\n normalized : bool, optional\n Whether to normaliz amplitude, by default False\n skip_sigmas: float, int\n Skip calculation if point on *map_2d* is more tha this amountof sigmas away in respective dimension\n Decreasing this increases performance at the cost of accuracy. Minimum value is 3\n \"\"\"\n # Check\n if skip_sigmas < 3:\n raise ValueError(\"Minimum of skip_sigmas is 3 to maintain reasonable accuracy\")\n\n error_amplitude_squared = amplitude_error ** 2\n \n # Loop over y indices\n for j in range(map_2d.shape[0]):\n \n # Extract current y coordinate\n y_coord = bin_centers_y[j]\n \n # Check y coordinate\n if abs(y_coord - mu_y) > skip_sigmas * sigma_y:\n continue\n \n # Loop over x indices\n for i in range(map_2d.shape[1]):\n\n # Extract current x coordinate \n x_coord = bin_centers_x[i]\n\n # Check x coordinate\n if abs(x_coord - mu_x) > skip_sigmas * sigma_x:\n continue\n \n # Apply Gaussian to map\n map_2d[j, i] += gauss_2d_pdf(x=x_coord,\n y=y_coord,\n mu_x=mu_x,\n mu_y=mu_y,\n sigma_x=sigma_x,\n sigma_y=sigma_y,\n amplitude=amplitude,\n normalized=normalized)\n\n # Apply Gaussian to error map e.g. with squared amplitude\n map_2d_error[j, i] += gauss_2d_pdf(x=x_coord,\n y=y_coord,\n mu_x=mu_x,\n mu_y=mu_y,\n sigma_x=sigma_x,\n sigma_y=sigma_y,\n amplitude=error_amplitude_squared,\n normalized=normalized)\n\n\n@njit\ndef _calc_bin_transit_times(bin_transit_times, bin_edges, scan_speed, scan_accel):\n \"\"\"\n Calculate the time it takes to transit each bin in scan direction and fill array\n\n Parameters\n ----------\n bin_transit_times: np.ndarray\n Array to fill the row bin times into\n bin_edges: np.ndarray\n Array of bin edges of scan rows\n scan_speed: float\n Scan speed in mm/s\n scan_accel: float\n De/acceleration with which *scan_speed* is approached/reduced in mm/s^2\n \"\"\"\n\n # Calculate the size of each bin\n bin_sizes = bin_edges[1:] - bin_edges[:-1]\n\n # Hold current speed\n current_speed = 0\n\n # Time needed to accelerate / decelerate to / from *scan_speed* in seconds\n # v = a * t\n de_accel_time = scan_speed / scan_accel\n\n # Distance covered for de/acceleration\n # s = a/2 * t^2\n de_accel_dist = scan_accel / 2. * de_accel_time ** 2.\n\n # Get index up to / from which is accelerated / decelerated\n idx = np.searchsorted(bin_edges, de_accel_dist)\n\n # Calculate the row bin times for the constant bins\n bin_transit_times[idx:-idx] = bin_sizes[idx:-idx] / scan_speed\n\n # Calculate the row bin times for the acceleration / deceleration phase\n for i in range(idx):\n reverse_idx = -(i + 1)\n # Calculate time\n bin_transit_times[i] = ((2 * bin_sizes[i] * scan_accel + current_speed ** 2) ** 0.5 - current_speed) / scan_accel\n bin_transit_times[reverse_idx] = ((2 * bin_sizes[reverse_idx] * scan_accel + current_speed ** 2) ** 0.5 - current_speed) / scan_accel\n\n # Update speed\n current_speed += scan_accel * bin_transit_times[i]\n\n\n@njit\ndef _process_row_wait(row_data, wait_beam_data, fluence_map, fluence_map_error, map_bin_edges_x, map_bin_centers_x, map_bin_centers_y, beam_sigma, scan_y_offset):\n \"\"\"\n Processes the times where the beam is waiting on the periphery of the scan area or switches rows\n\n Parameters\n ----------\n row_data : numpy.ndarray\n Structured numpy array containing data of current row\n wait_beam_data : numpy.ndarray\n Beam data measured while waiting, in-between two rows\n fluence_map : numpy.ndarray\n Two-dimensional numpy.ndarray which holds the fluence distribution and is updated for this row\n fluence_map_error : numpy.ndarray\n Two-dimensional numpy.ndarray which holds the fluence error distribution and is updated for this row\n row_bin_transit_times : numpy.ndarray\n Flat numpy array which is used to hold the bin transit times for this row\n map_bin_edges_x : numpy.ndarray\n Flat numpy array holding the bin edges of the *fluence_map* in scan direction\n map_bin_centers_x : numpy.ndarray\n Flat numpy array holding the bin centers of the *fluence_map* in scan direction\n map_bin_centers_y : numpy.ndarray\n Flat numpy array holding the bin centers of the *fluence_map* in row direction\n beam_sigma : tuple, list, numpy.ndarray\n Iterable of beam sigmas with len(beam_sigma) == 2\n scan_y_offset : float\n Offset in mm which determines the relative 0 position in row direction: same as the y coordinate of row 0\n \"\"\"\n \n # Determine the mean of the beam\n wait_mu_x = map_bin_edges_x[-1 if row_data['row'] % 2 else 0]\n wait_mu_y = row_data['row_start_y'] - scan_y_offset\n\n # Add variation to the uncertainty\n wait_protons_std = np.std(wait_beam_data['beam_current'])\n \n # Loop over currents and apply Gauss kernel at given position\n for i in range(wait_beam_data.shape[0] - 1):\n\n # Get beam current measurement\n wait_current = wait_beam_data[i]['beam_current']\n wait_current_error = wait_beam_data[i]['beam_current_error']\n\n # Calculate how many seconds this current was present while waiting\n wait_interval = wait_beam_data[i+1]['timestamp'] - wait_beam_data[i]['timestamp']\n\n # Integrate over *wait_interval* to obtain number of protons induced\n wait_protons = wait_current * wait_interval / elementary_charge\n wait_protons_error = wait_current_error * wait_interval / elementary_charge\n wait_protons_error = (wait_protons_error**2 + wait_protons_std**2)**.5\n\n # Apply Gaussian kernel for protons\n apply_gauss_2d_kernel(map_2d=fluence_map,\n map_2d_error=fluence_map_error,\n amplitude=wait_protons,\n amplitude_error=wait_protons_error,\n bin_centers_x=map_bin_centers_x,\n bin_centers_y=map_bin_centers_y,\n mu_x=wait_mu_x,\n mu_y=wait_mu_y,\n sigma_x=beam_sigma[0],\n sigma_y=beam_sigma[1],\n normalized=False)\n\n\n@njit\ndef _process_row_scan(row_data, row_beam_data, fluence_map, fluence_map_error, row_bin_transit_times, map_bin_edges_x, map_bin_centers_x, map_bin_centers_y, beam_sigma, scan_y_offset):\n \"\"\"\n Processes the scanning of a single row.\n\n Parameters\n ----------\n row_data : numpy.ndarray\n Structured numpy array containing data of current row\n row_beam_data : numpy.ndarray\n Beam data measured during scanning of this row; used for interpolation\n fluence_map : numpy.ndarray\n Two-dimensional numpy.ndarray which holds the fluence distribution and is updated for this row\n fluence_map_error : numpy.ndarray\n Two-dimensional numpy.ndarray which holds the fluence error distribution and is updated for this row\n row_bin_transit_times : numpy.ndarray\n Flat numpy array which is used to hold the bin transit times for this row\n map_bin_edges_x : numpy.ndarray\n Flat numpy array holding the bin edges of the *fluence_map* in scan direction\n map_bin_centers_x : numpy.ndarray\n Flat numpy array holding the bin centers of the *fluence_map* in scan direction\n map_bin_centers_y : numpy.ndarray\n Flat numpy array holding the bin centers of the *fluence_map* in row direction\n beam_sigma : tuple, list, numpy.ndarray\n Iterable of beam sigmas with len(beam_sigma) == 2\n scan_y_offset : float\n Offset in mm which determines the relative 0 position in row direction: same as the y coordinate of row 0\n \"\"\"\n\n # Update row bin times\n _calc_bin_transit_times(bin_transit_times=row_bin_transit_times, bin_edges=map_bin_edges_x, scan_speed=row_data['row_scan_speed'], scan_accel=2500) # FIXME: get accel from Irrad data\n\n # Determine communication timing overhead; assume symmetric dead time at row start and end\n row_start_overhead = (row_data['row_stop_timestamp'] - row_data['row_start_timestamp'] - row_bin_transit_times.sum()) / 2.0\n \n # Get the timestamp from which to check for beam currents, adjusted by the overhead\n actual_row_start_timestamp = row_data['row_start_timestamp'] + row_start_overhead\n\n # Calculate the timstamps which correspond to being in the map_bin_centers_x \n row_bin_center_timestamps = actual_row_start_timestamp + np.cumsum(row_bin_transit_times) - row_bin_transit_times / 2.0\n \n # Interpolate the beam current measurements at the bin center for this scan\n row_bin_center_currents = np.interp(row_bin_center_timestamps, row_beam_data['timestamp'], row_beam_data['beam_current'])\n row_bin_center_current_errors = np.interp(row_bin_center_timestamps, row_beam_data['timestamp'], row_beam_data['beam_current_error'])\n\n # Integrate the current measurements with the times spent in each bin to calculate the amount of protons in the bin\n row_bin_center_protons = (row_bin_center_currents * row_bin_transit_times) / elementary_charge\n row_bin_center_proton_errors = (row_bin_center_current_errors * row_bin_transit_times) / elementary_charge\n row_bin_center_proton_errors = (row_bin_center_proton_errors**2 + np.std(row_bin_center_protons)**2)**.5\n\n # Loop over row times\n for i in range(row_bin_center_protons.shape[0]):\n \n # Update mean location of the distribution\n mu_x = map_bin_centers_x[(-(i+1) if row_data['row'] % 2 else i)]\n mu_y = row_data['row_start_y'] - scan_y_offset\n \n # Apply Gaussian kernel for protons\n apply_gauss_2d_kernel(map_2d=fluence_map,\n map_2d_error=fluence_map_error,\n amplitude=row_bin_center_protons[i],\n amplitude_error=row_bin_center_proton_errors[i],\n bin_centers_x=map_bin_centers_x,\n bin_centers_y=map_bin_centers_y,\n mu_x=mu_x,\n mu_y=mu_y,\n sigma_x=beam_sigma[0],\n sigma_y=beam_sigma[1],\n normalized=False)\n\n\n@njit\ndef _process_row(row_data, beam_data, fluence_map, fluence_map_error, row_bin_transit_times, map_bin_edges_x, map_bin_centers_x, map_bin_centers_y, beam_sigma, scan_y_offset, current_row_idx):\n \"\"\"\n Process the scanning and waiting / switching of a single row\n\n Parameters\n ----------\n row_data : numpy.ndarray\n Structured numpy array containing data of current row\n beam_data : numpy.ndarray, tables.Table\n Complete beam data which is sliced using *current_row_idx*\n fluence_map : numpy.ndarray\n Two-dimensional numpy.ndarray which holds the fluence distribution and is updated for this row\n fluence_map_error : numpy.ndarray\n Two-dimensional numpy.ndarray which holds the fluence error distribution and is updated for this row\n row_bin_transit_times : numpy.ndarray\n Flat numpy array which is used to hold the bin transit times for this row\n map_bin_edges_x : numpy.ndarray\n Flat numpy array holding the bin edges of the *fluence_map* in scan direction\n map_bin_centers_x : numpy.ndarray\n Flat numpy array holding the bin centers of the *fluence_map* in scan direction\n map_bin_centers_y : numpy.ndarray\n Flat numpy array holding the bin centers of the *fluence_map* in row direction\n beam_sigma : tuple, list, numpy.ndarray\n Iterable of beam sigmas with len(beam_sigma) == 2\n scan_y_offset : float\n Offset in mm which determines the relative 0 position in row direction: same as the y coordinate of row 0\n current_row_idx : int\n Integer corresponding to the index of beam data which has already been processed.\n Allows slicing beam data for (minimal) speed-up instead of always searching entire beam data (np.searchsorted is very, very fast)\n\n Returns\n -------\n int\n Index up to which beam data has been processed: used for slicing in next call of this function\n \"\"\"\n\n # Advance slice of beam data which is relevant for this row\n current_beam_data = beam_data[current_row_idx:]\n\n # Get indice limits of beam currents measured during scanning of current row\n row_start_idx = np.searchsorted(current_beam_data['timestamp'], row_data['row_start_timestamp'], side='left')\n row_stop_idx = np.searchsorted(current_beam_data['timestamp'], row_data['row_stop_timestamp'], side='right')\n\n # Get beam data current measurements and corresponding timestamps of this row scan\n row_beam_data = current_beam_data[row_start_idx:row_stop_idx]\n \n # If this is not the first row, we want to process the waiting / switching row\n if current_row_idx > 0:\n \n # Get beam current measurements which were taken while waiting to start next row\n wait_beam_data = current_beam_data[:row_start_idx]\n \n # Process the currents measured while waiting\n _process_row_wait(row_data=row_data,\n wait_beam_data=wait_beam_data,\n fluence_map=fluence_map,\n fluence_map_error=fluence_map_error,\n map_bin_edges_x=map_bin_edges_x,\n map_bin_centers_x=map_bin_centers_x,\n map_bin_centers_y=map_bin_centers_y,\n beam_sigma=beam_sigma,\n scan_y_offset=scan_y_offset)\n\n # Process the scan\n _process_row_scan(row_data=row_data,\n row_beam_data=row_beam_data,\n fluence_map=fluence_map,\n fluence_map_error=fluence_map_error,\n row_bin_transit_times=row_bin_transit_times,\n map_bin_edges_x=map_bin_edges_x,\n map_bin_centers_x=map_bin_centers_x,\n map_bin_centers_y=map_bin_centers_y,\n beam_sigma=beam_sigma,\n scan_y_offset=scan_y_offset)\n \n # Calculate index to return\n return current_row_idx + row_stop_idx\n", "sub_path": "irrad_control/analysis/fluence.py", "file_name": "fluence.py", "file_ext": "py", "file_size_in_byte": 26278, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.max", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 66, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 72, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.searchsorted", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.searchsorted", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 195, "usage_type": "call"}, {"api_name": "numba.njit", "line_number": 159, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 218, "usage_type": "attribute"}, {"api_name": "numba.njit", "line_number": 198, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 240, "usage_type": "attribute"}, {"api_name": "numba.njit", "line_number": 221, "usage_type": "name"}, {"api_name": "numba.njit", "line_number": 243, "usage_type": "name"}, {"api_name": "numpy.searchsorted", "line_number": 356, "usage_type": "call"}, {"api_name": "numba.njit", "line_number": 324, "usage_type": "name"}, {"api_name": "numpy.std", "line_number": 406, "usage_type": "call"}, {"api_name": "irrad_control.analysis.constants.elementary_charge", "line_number": 419, "usage_type": "name"}, {"api_name": "irrad_control.analysis.constants.elementary_charge", "line_number": 420, "usage_type": "name"}, {"api_name": "numba.njit", "line_number": 372, "usage_type": "name"}, {"api_name": "numpy.cumsum", "line_number": 476, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 479, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 480, "usage_type": "call"}, {"api_name": "irrad_control.analysis.constants.elementary_charge", "line_number": 483, "usage_type": "name"}, {"api_name": "irrad_control.analysis.constants.elementary_charge", "line_number": 484, "usage_type": "name"}, {"api_name": "numpy.std", "line_number": 485, "usage_type": "call"}, {"api_name": "numba.njit", "line_number": 437, "usage_type": "name"}, {"api_name": "numpy.searchsorted", "line_number": 549, "usage_type": "call"}, {"api_name": "numpy.searchsorted", "line_number": 550, "usage_type": "call"}, {"api_name": "numba.njit", "line_number": 508, "usage_type": "name"}]} +{"seq_id": "2297476", "text": "from __future__ import absolute_import\n\nfrom typing import TypeVar, Type, Optional, Any # noqa\n\nfrom odin.exceptions import ValidationError\nfrom . import Field\n\ntry:\n from enum import Enum\nexcept ImportError:\n Enum = None\n\n_all_fields = []\n\nif Enum:\n _all_fields.append('EnumField')\n\n ET = TypeVar('ET', Enum, Enum)\n\n class EnumField(Field):\n \"\"\"\n Field for handling Python enums.\n\n This field requires Python >= 3.4 or the enum34 package.\n\n \"\"\"\n def __init__(self, enum, **options):\n # type: (Type[Enum]) -> None\n options['choices'] = None\n super(EnumField, self).__init__(**options)\n self.enum = enum\n\n def to_python(self, value):\n # type: (Any) -> Optional[ET]\n if value is None:\n return\n\n # Attempt to convert\n try:\n return self.enum(value)\n except ValueError:\n raise ValidationError(self.error_messages['invalid_choice'] % value)\n\n def prepare(self, value):\n # type: (Optional[Enum]) -> str\n if value in self.enum:\n return value.value\n\n\n__all__ = tuple(_all_fields)\n", "sub_path": "odin/fields/future.py", "file_name": "future.py", "file_ext": "py", "file_size_in_byte": 1217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "enum.Enum", "line_number": 11, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 18, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 18, "usage_type": "argument"}, {"api_name": "odin.exceptions.ValidationError", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "251839716", "text": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Registration(models.Model):\n email = models.EmailField(_('email'), max_length=254, blank=True)\n age = models.IntegerField(_('age'), null=True, blank=True)\n city = models.CharField(_('city'), max_length=255, blank=True)\n country = models.CharField(_('country'), max_length=255, blank=True)\n comment = models.TextField(_('comment'), blank=True)\n likes = models.BooleanField(_('likes'))\n has_cat = models.BooleanField(_('has cat'))\n would_buy = models.BooleanField(_('would buy'))\n ip = models.GenericIPAddressField(null=True, blank=True)\n created = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name = _('registration')\n verbose_name_plural = _('registrations')\n", "sub_path": "base/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 817, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.db.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 6, "usage_type": "call"}, {"api_name": "django.db.models.IntegerField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models.GenericIPAddressField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 18, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "611508890", "text": "# Program to analyze sentiments from Twitter tweets : \\\n# https://www.digitalocean.com/community/tutorials/how-to-perform-sentiment-analysis-in-python-3-using-the-natural-language-toolkit-nltk\n\n# Use Twitter API to get data (future work)\n\n\n# Currently uses data from the nltk's twitter_samples\nfrom nltk.corpus import twitter_samples\nfrom nltk.tag import pos_tag\nfrom nltk.stem.wordnet import WordNetLemmatizer\nimport re,string,random\nfrom nltk.corpus import stopwords\nfrom nltk import FreqDist\nfrom nltk import classify\nfrom nltk import NaiveBayesClassifier\nfrom nltk.tokenize import word_tokenize\n\nstop_words = stopwords.words('english')\n\n\ndef remove_noise(tweet_tokens, stop_words):\n\n cleaned_tokens = []\n\n for token, tag in pos_tag(tweet_tokens):\n token = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\\(\\),]|'\\\n '(?:%[0-9a-fA-F][0-9a-fA-F]))+','', token)\n token = re.sub(\"(@[A-Za-z0-9_]+)\",\"\", token)\n\n if tag.startswith(\"NN\"):\n pos = 'n'\n elif tag.startswith('VB'):\n pos = 'v'\n else:\n pos = 'a'\n\n lemmatizer = WordNetLemmatizer()\n token = lemmatizer.lemmatize(token, pos)\n\n if len(token) > 0 and token not in string.punctuation and token.lower() not in stop_words:\n cleaned_tokens.append(token.lower())\n\n return cleaned_tokens\n\n\n# generator to get the list of all cleaned tokens\ndef get_all_words(cleaned_tokens_list):\n for tokens in cleaned_tokens_list:\n for token in tokens:\n yield token\n\n\n# generator to convert the tweets to dictionaries to pass thru the Naive bayes classifier model\ndef get_tweets_for_model(cleaned_tokens_list):\n for tweet_tokens in cleaned_tokens_list:\n yield dict([token, True] for token in tweet_tokens)\n\n\npositive_tweet_tokens = twitter_samples.tokenized('positive_tweets.json')\nnegative_tweet_tokens = twitter_samples.tokenized('negative_tweets.json')\n\npositive_cleaned_tokens_list = []\nnegative_cleaned_tokens_list = []\n\nfor tokens in positive_tweet_tokens:\n positive_cleaned_tokens_list.append(remove_noise(tokens, stop_words))\n\nfor tokens in negative_tweet_tokens:\n negative_cleaned_tokens_list.append(remove_noise(tokens, stop_words))\n\n# Just a test: get a list of all the tokens in the positive generated lists\nall_pos_words = get_all_words(positive_cleaned_tokens_list)\nfreq_dist_pos = FreqDist(all_pos_words)\n# -- end of test\n\n# Get the dictionaries\npositive_tokens_for_model = get_tweets_for_model(positive_cleaned_tokens_list)\nnegative_tokens_for_model = get_tweets_for_model(negative_cleaned_tokens_list)\n\n# split the dataset into training and test data\npositive_dataset = [(tweet_dict, \"Positive\") for tweet_dict in positive_tokens_for_model]\nnegative_dataset = [(tweet_dict, \"Negative\") for tweet_dict in negative_tokens_for_model]\n\ndataset = positive_dataset + negative_dataset\n\nrandom.shuffle(dataset) # randomize the dataset to remove any bias\n\n# Split training data : test data to be 70% : 30% of data set\ntrain_data = dataset[0:7000]\ntest_data = dataset[7000:]\n\nclassifier = NaiveBayesClassifier.train(train_data)\n\n# print(\"Accuracy is:\", classify.accuracy(classifier, test_data))\n#\n# print(classifier.show_most_informative_features(10))\n\n# testing the model\ncustom_tweet = \"I ordered just once from TerribleCo, they screwed up, never used the app again.\"\n\ncustom_tokens = remove_noise(word_tokenize(custom_tweet), stop_words)\n# print(custom_tokens)\nprint(classifier.classify(dict([token, True] for token in custom_tokens)))\n\n\n", "sub_path": "sentimentAnalysis.py", "file_name": "sentimentAnalysis.py", "file_ext": "py", "file_size_in_byte": 3554, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "nltk.corpus.stopwords.words", "line_number": 18, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 18, "usage_type": "name"}, {"api_name": "nltk.tag.pos_tag", "line_number": 25, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 26, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 28, "usage_type": "call"}, {"api_name": "nltk.stem.wordnet.WordNetLemmatizer", "line_number": 37, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 40, "usage_type": "attribute"}, {"api_name": "nltk.corpus.twitter_samples.tokenized", "line_number": 59, "usage_type": "call"}, {"api_name": "nltk.corpus.twitter_samples", "line_number": 59, "usage_type": "name"}, {"api_name": "nltk.corpus.twitter_samples.tokenized", "line_number": 60, "usage_type": "call"}, {"api_name": "nltk.corpus.twitter_samples", "line_number": 60, "usage_type": "name"}, {"api_name": "nltk.FreqDist", "line_number": 73, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 86, "usage_type": "call"}, {"api_name": "nltk.NaiveBayesClassifier.train", "line_number": 92, "usage_type": "call"}, {"api_name": "nltk.NaiveBayesClassifier", "line_number": 92, "usage_type": "name"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "68990212", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 23 15:36:03 2020\n\n@author: REZA\n\"\"\"\n\nfrom tensorflow.keras.layers import AveragePooling2D\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.applications.resnet50 import ResNet50 \nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom time import strftime\nimport seaborn as sns\nplt.style.use('seaborn-poster')\n\n\n\ndef Build_ResNet50(trainX, trainY,testX, testY,trainAug,labels,classes):\n \n\n INIT_LR = 1e-3\n EPOCHS = 10\n BS = 8\n \n \n \n baseModel = ResNet50(weights=\"imagenet\", include_top=False,input_tensor=Input(shape=(224, 224, 3)))\n \n headModel = baseModel.output\n headModel = AveragePooling2D(pool_size=(4, 4))(headModel)\n headModel = Flatten(name=\"flatten\")(headModel)\n headModel = Dense(128, activation=\"relu\")(headModel)\n headModel = Dropout(0.5)(headModel)\n headModel = Dense(2, activation=\"softmax\")(headModel)\n \n \n model_ResNet50 = Model(inputs=baseModel.input, outputs=headModel)\n\n \n for layer in baseModel.layers:\n layer.trainable = False\n \n # compile our model\n print(\"[INFO] compiling model...\")\n opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)\n model_ResNet50.compile(loss=\"binary_crossentropy\", optimizer=opt,metrics=[\"accuracy\"])\n \n print(\"[INFO] training head...\")\n HistoryResNet50 = model_ResNet50.fit_generator(\n trainAug.flow(trainX, trainY, batch_size=BS),\n steps_per_epoch=len(trainX) // BS,\n validation_data=(testX, testY),\n validation_steps=len(testX) // BS,\n epochs=EPOCHS)\n \n \n print(\"[INFO] evaluating network...\")\n predIdxs_model_ResNet50 = model_ResNet50.predict(testX, batch_size=BS)\n \n predIdxs_model_ResNet50 = np.argmax(predIdxs_model_ResNet50, axis=1)\n \n \n cm = confusion_matrix(testY.argmax(axis=1), predIdxs_model_ResNet50)\n sns.heatmap(cm.T,square=True,annot=True,fmt='d',cbar=False,\n xticklabels=['Normal','Covid19'],yticklabels=['Normal','Covid19'] )\n \n print(classification_report(testY.argmax(axis=1), predIdxs_model_ResNet50))\n\n \n total = sum(sum(cm))\n acc = (cm[0, 0] + cm[1, 1]) / total\n sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])\n specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])\n \n print(cm)\n print(\"acc: {:.4f}\".format(acc))\n print(\"sensitivity: {:.4f}\".format(sensitivity))\n print(\"specificity: {:.4f}\".format(specificity))\n \n print(\"[INFO] saving COVID-19 detector model...\")\n model_ResNet50.save(\"./models/\"+strftime(\"5.25.resnet.h5\",), save_format=\"h5\")\n plt.figure()\n plt.plot(HistoryResNet50.history['accuracy'])\n plt.plot(HistoryResNet50.history['val_accuracy'])\n plt.plot(HistoryResNet50.history['loss'])\n plt.plot(HistoryResNet50.history['val_loss'])\n plt.title('model accuracy')\n plt.ylabel('accuracy / loss')\n plt.xlabel('epoch')\n plt.legend(['accuracy', 'Validation accuracy','loss','Validation loss'])\n plt.show()\n return HistoryResNet50,predIdxs_model_ResNet50", "sub_path": "Train/Resnet50Model.py", "file_name": "Resnet50Model.py", "file_ext": "py", "file_size_in_byte": 3346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "tensorflow.keras.applications.resnet50.ResNet50", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.AveragePooling2D", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 68, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 71, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 75, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "56705164", "text": "from setuptools import setup, find_packages\n\n\ninstall_required = [l.strip() for l in open(\"requirements.txt\", \"r\")]\n\n\nmetadata = {'name': 'squirrel',\n 'version': '0.1',\n 'packages': find_packages(),\n 'author': 'shonenada',\n 'author_email': 'shonenada@gmail.com',\n 'url': \"https://github.com/shonenada/squirrel\",\n 'zip_safe': False,\n 'platforms': ['all'],\n 'package_data': {\"\": ['*.html', '*.jpg', '*.png', '*.css', '*.js',\n '*.ico', '*.coffee', '*.less', '*.stylus']},\n 'install_requires': install_required,\n 'description': 'a simple blog system.'}\n\n\nif __name__ == '__main__':\n setup(**metadata)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 743, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "setuptools.find_packages", "line_number": 9, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "100867371", "text": "import importlib\nimport json\nimport logging\n\nimport pygame\n\nfrom dotmap import DotMap\nfrom StateMachine.GameStateMachine import GameStateMachine\nfrom pyqtree import Index\nfrom CollisionHandler import CollisionHandler\n\nclass Game:\n def __init__(self):\n self.gameObjects = []\n self.collidableObjects = []\n self.player = None\n self.stateMachine = GameStateMachine()\n\n # Initial Game State - Load array of game objects\n with open(\"Levels/Level1.json\") as jsonFile:\n levelData = json.load(jsonFile)\n for o in levelData[\"gameObjects\"]:\n # print('Class: ' + o['gameObjectClass'])\n do = DotMap(o)\n # print('Class: ' + do.gameObjectClass)\n module = importlib.import_module(do.gameObjectModule)\n class_ = getattr(module, do.gameObjectClass)\n\n no = class_(module.jsonMap(do)) # New Object from DotMapped Object\n if class_.__name__ == \"Player\":\n self.player = no\n # else:\n self.gameObjects.append(no)\n\n self.collidableObjects = list(filter(lambda o: o.collidable == True, self.gameObjects))\n\n # Player Actions Available (type == Key Up or Down)\n def action_left(self, type):\n # Overworld\n self.player.toggle_movement(\"left\")\n\n def action_right(self, type):\n self.player.toggle_movement(\"right\")\n\n def action_up(self, type):\n self.player.toggle_movement(\"up\")\n\n def action_down(self, type):\n self.player.toggle_movement(\"down\")\n\n def update(self):\n collisionIndex = Index((0, 0, 800, 600))\n for o in self.gameObjects:\n o.update()\n if o.collidable:\n collisionIndex.insert(o, (o.bounds.left, o.bounds.top, o.bounds.right, o.bounds.bottom))\n \n for i in self.collidableObjects:\n collisions = collisionIndex.intersect((i.bounds.left, i.bounds.top, i.bounds.right, i.bounds.bottom))\n \n if(len(collisions) > 1): # Intersecting more than self\n # logging.info(\"Collision!\")\n CollisionHandler(collisions)", "sub_path": "Game.py", "file_name": "Game.py", "file_ext": "py", "file_size_in_byte": 2195, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "StateMachine.GameStateMachine.GameStateMachine", "line_number": 17, "usage_type": "call"}, {"api_name": "json.load", "line_number": 21, "usage_type": "call"}, {"api_name": "dotmap.DotMap", "line_number": 24, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 26, "usage_type": "call"}, {"api_name": "pyqtree.Index", "line_number": 52, "usage_type": "call"}, {"api_name": "CollisionHandler.CollisionHandler", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "273986205", "text": "from django.core.management.base import BaseCommand, CommandError\nfrom api.models import Videos\nfrom api.serializers import VideosSerializer\nimport requests\nimport json\n\nclass Command(BaseCommand):\n help = 'Gets the new links shared in slackarchive music channel'\n\n def handle(self, *args, **options):\n #self.get_latest_messages()\n self.update_from_json_file()\n\n def get_latest_messages(self, **args):\n # These values are hardcoded for dev-s team and their music channel\n headers = {'referer': 'https://dev-s.slackarchive.io/ama/page-1'}\n url = \"https://api.slackarchive.io/v1/messages?size=500&team=T06VBQ8SV&channel=C0DT79Y86\"\n self.stdout.write(\"Trying to get latest 500 messages from dev-s music channel\", ending='')\n response = requests.get(url, headers=headers)\n if response.status_code == requests.codes.ok:\n response_json = response.json()\n self.extract_youtube_links(response_json['messages'])\n else:\n self.stdout.write(\"Trying to get latest 500 messages from dev-s music channel\", ending='')\n self.stdout.write(response.text)\n\n def extract_youtube_links(self, messages):\n #self.stdout.write(\"messages are \" + str(messages), ending='')\n for message in messages:\n text = message['text']\n if text.startswith('<') and text.endswith('>'):\n text = text[1:-1]\n self.update_videos_model(text)\n\n def update_videos_model(self, url):\n # this is very rudimentary way of parsing youtube urls\n # need a better mechanism, to support different types as well\n url_list = url.split('?v=')\n if len(url_list) == 2:\n id = url_list[1]\n # if we don't have this video in model already, update the model\n if not Videos.objects.filter(youtube_id=id):\n video = Videos(youtube_url=url,youtube_id=id)\n video.save()\n else:\n self.stdout.write(\"passed url was\" + str(url), ending='')\n\n def update_from_json_file(self):\n temp_file_name = '/Users/mani.mishra/dev-hackers-slack-archive/json-dumps/music-total.json'\n with open(temp_file_name) as messages:\n content = messages.read()\n messages_dict = json.loads(content)\n messages_list = messages_dict['messages']\n self.extract_youtube_links(messages_list)\n", "sub_path": "slacktube/api/management/commands/scrape.py", "file_name": "scrape.py", "file_ext": "py", "file_size_in_byte": 2446, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 7, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 20, "usage_type": "attribute"}, {"api_name": "api.models.Videos.objects.filter", "line_number": 42, "usage_type": "call"}, {"api_name": "api.models.Videos.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "api.models.Videos", "line_number": 42, "usage_type": "name"}, {"api_name": "api.models.Videos", "line_number": 43, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "245848371", "text": "import asyncio\nimport hashlib\nimport hmac\nimport re\nfrom logging import getLogger\nfrom time import time\nfrom urllib.parse import urlencode\n\nfrom exchanges.base import BaseApi, Order\nfrom exchanges.exceptions import BaseExchangeException\n\n\nclass LiquiApiException(BaseExchangeException):\n pass\n\n\nclass NoOrdersException(BaseExchangeException):\n pass\n\n\nclass LiquiApi(BaseApi):\n name = 'liqui'\n api_id = 1\n url = 'https://liqui.io/'\n api_regex = re.compile(r'\\w{8}-\\w{8}-\\w{8}-\\w{8}-\\w{8}') # A1B2C3D4-A1B2C3D4-A1B2C3D4-A1B2C3D4-A1B2C3D4\n secret_regex = re.compile(r'\\w{64}') # a78ab8f2410498e696cc6719134c62d5a852eb26070a31cb6a469b5932bf376b\n\n attempts_limit = 5\n\n async def _tapi(self, **params):\n attempt, delay = 1, 1\n while True:\n try:\n params['nonce'] = int(time())\n data = await self.post(\n 'https://api.liqui.io/tapi',\n headers={'Key': self._key, 'Sign': self._sign(params)},\n data=params\n )\n if 'error' in data:\n if data['error'] == 'no orders':\n raise NoOrdersException(data['error'])\n raise LiquiApiException(data['error'])\n return data.get('return', data)\n except LiquiApiException as e:\n getLogger().error(f'attempt {attempt}/{self.attempts_limit}, next attempt in {delay} seconds')\n getLogger().exception(e)\n attempt += 1\n if attempt > self.attempts_limit:\n return {}\n await asyncio.sleep(delay)\n delay *= 2\n\n def _sign(self, data):\n if isinstance(data, dict):\n data = urlencode(data)\n return hmac.new(self._secret.encode(), data.encode(), hashlib.sha512).hexdigest()\n\n async def active_orders(self) -> [Order, ]:\n try:\n api_orders = await self._tapi(method='ActiveOrders', pair='')\n orders = []\n for order_id, order in api_orders.items():\n order = Order(\n self.api_id,\n order_id,\n order['type'],\n '-'.join(cur.upper() for cur in order['pair'].split('_')),\n order['rate'],\n order['amount'],\n order['status'] > 0,\n )\n orders.append(order)\n return orders\n except NoOrdersException:\n return []\n\n async def order_info(self, order_id: str) -> Order:\n order = (await self._tapi(method='OrderInfo', order_id=order_id))[order_id]\n return Order(\n self.api_id,\n order_id,\n order['type'],\n '-'.join(cur.upper() for cur in order['pair'].split('_')),\n order['rate'],\n order['start_amount'],\n order['status'] > 0,\n )\n", "sub_path": "exchanges/liqui.py", "file_name": "liqui.py", "file_ext": "py", "file_size_in_byte": 2961, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "exchanges.exceptions.BaseExchangeException", "line_number": 13, "usage_type": "name"}, {"api_name": "exchanges.exceptions.BaseExchangeException", "line_number": 17, "usage_type": "name"}, {"api_name": "exchanges.base.BaseApi", "line_number": 21, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 25, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 46, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 47, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 56, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 57, "usage_type": "call"}, {"api_name": "hashlib.sha512", "line_number": 57, "usage_type": "attribute"}, {"api_name": "exchanges.base.Order", "line_number": 64, "usage_type": "call"}, {"api_name": "exchanges.base.Order", "line_number": 59, "usage_type": "name"}, {"api_name": "exchanges.base.Order", "line_number": 80, "usage_type": "call"}, {"api_name": "exchanges.base.Order", "line_number": 78, "usage_type": "name"}]} +{"seq_id": "264882933", "text": "# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Copyright (c) 2014 SF Soluciones.\n# (http://www.sfsoluciones.com)\n# contacto@sfsoluciones.com\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom osv import osv, fields\n\nfrom openerp.tools import float_compare\nfrom tools.translate import _\n\nclass sale_order_line(osv.osv):\n _inherit = 'sale.order.line'\n \n def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,\n uom=False, qty_uos=0, uos=False, name='', partner_id=False,\n lang=False, update_tax=True, date_order=False, packaging=False,\n fiscal_position=False, flag=False, context=None):\n product_pool = self.pool.get('product.product')\n product_uom_obj = self.pool.get('product.uom')\n res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty, uom, qty_uos,\n uos, name, partner_id, lang, update_tax, date_order,\n packaging, fiscal_position, flag, context=context)\n warning = {}\n if product:\n product_obj = product_pool.browse(cr, uid, product, context=context)\n uom2 = False\n if uom:\n uom2 = product_uom_obj.browse(cr, uid, uom)\n if product_obj.uom_id.category_id.id != uom2.category_id.id:\n uom = False\n if not uom2:\n uom2 = product_obj.uom_id\n res_packing = self.product_packaging_change(cr, uid, ids, pricelist, product, qty, uom, partner_id, packaging, context=context)\n warning_msgs = res_packing.get('warning') and res_packing['warning']['message'] or ''\n compare_qty = float_compare(product_obj.qty_available * uom2.factor, qty * product_obj.uom_id.factor, precision_rounding=product_obj.uom_id.rounding)\n if (product_obj.type=='product') and int(compare_qty) == -1 \\\n and (product_obj.procure_method=='make_to_stock'):\n warn_msg = _('You plan to sell %.2f %s but you only have %.2f %s available !\\nThe real stock is %.2f %s. (without reservations)') % \\\n (qty, uom2 and uom2.name or product_obj.uom_id.name,\n max(0,product_obj.qty_available), product_obj.uom_id.name,\n max(0,product_obj.qty_available), product_obj.uom_id.name)\n warning_msgs += _(\"Not enough stock ! : \") + warn_msg + \"\\n\\n\"\n if warning_msgs:\n warning = {\n 'title': _('Configuration Error!'),\n 'message' : warning_msgs\n }\n res.update({'warning': warning})\n return res\n \nsale_order_line()\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n", "sub_path": "sfs_sale_customization/sale.py", "file_name": "sale.py", "file_ext": "py", "file_size_in_byte": 3645, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "osv.osv.osv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "osv.osv", "line_number": 28, "usage_type": "name"}, {"api_name": "openerp.tools.float_compare", "line_number": 52, "usage_type": "call"}, {"api_name": "tools.translate._", "line_number": 55, "usage_type": "call"}, {"api_name": "tools.translate._", "line_number": 59, "usage_type": "call"}, {"api_name": "tools.translate._", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "626263588", "text": "#!/usr/bin/env python\n\nimport sys\nimport os\nimport shutil\nimport math\nimport numpy as np\nimport argparse\nimport contextlib\nimport itertools\nimport signal\nimport subprocess\nimport tempfile\nimport threading\n\ntry:\n import queue # Python 3\nexcept ImportError:\n import Queue as queue # Python 2\n\nsys.dont_write_bytecode = True\n\nimport database_tool\n\n# Character short-cuts and global constants\nif os.name == 'nt':\n div = '\\\\'\nelse:\n div = '/'\n\nlb = '\\n'\n\nlb1 = lb\nlb2 = lb * 2\nlb3 = lb * 3\n\ndetection_ext = \"_detections.csv\"\ntrack_ext = \"_tracks.csv\"\n\ndefault_pipeline = \"pipelines\" + div + \"index_default.pipe\"\nno_pipeline = \"none\"\n\n# Global flag to see if any video has successfully completed processing\nany_video_complete = False\n\n# Helper class to list files with a given extension in a directory\ndef list_files_in_dir( folder ):\n if not os.path.exists( folder ) and os.path.exists( folder + \".lnk\" ):\n folder = folder + \".lnk\"\n folder = folder if not os.path.islink( folder ) else os.readlink( folder )\n if not os.path.isdir( folder ):\n exit_with_error( \"Input folder \\\"\" + folder + \"\\\" does not exist\" )\n return [\n os.path.join( folder, f ) for f in sorted( os.listdir( folder ) )\n if not f.startswith('.')\n ]\n\ndef list_files_in_dir_w_ext( folder, extension ):\n return [ f for f in list_files_in_dir( folder ) if f.endswith( extension ) ]\n\ndef has_valid_ext( f, ext_list ):\n for ext in ext_list:\n if f.lower().endswith( ext ):\n return True\n return False\n\ndef list_files_in_dir_w_exts( folder, extensions ):\n ext_list = extensions.split(\";\")\n return [ f for f in list_files_in_dir( folder ) if has_valid_ext( f, ext_list ) ]\n\ndef list_videos_in_dir( folder, extensions ):\n files = list_files_in_dir_w_exts( folder, extensions )\n if len( files ) == 0:\n files = [ f for f in list_files_in_dir( folder ) if os.path.isdir( f ) ]\n if len( files ) == 0:\n files = list_files_in_dir( folder )\n return files\n\n# Default message logging\ndef log_info( msg ):\n sys.stdout.write( msg )\n sys.stdout.flush()\n\n# Create a directory if it doesn't exist\ndef create_dir( dirname, logging=True, recreate=False, prompt=True ):\n if dirname == '.' or dirname == \"\":\n return\n if recreate:\n if os.path.exists( dirname ):\n if not prompt or database_tool.query_yes_no( lb1 + \"Reset folder: \" + dirname + \"?\" ):\n if logging:\n log_info( \"Removing \" + dirname + lb )\n shutil.rmtree( dirname )\n elif prompt:\n sys.exit(0)\n else:\n log_info( lb )\n if not os.path.exists( dirname ):\n if logging:\n log_info( \"Creating \" + dirname + lb )\n os.makedirs( dirname )\n\nCUDA_VISIBLE_DEVICES = \"CUDA_VISIBLE_DEVICES\"\n\ndef get_real_gpu_index(n):\n \"\"\"Return the real index for the nth GPU as a string. This respects\n CUDA_VISIBLE_DEVICES\n\n \"\"\"\n cvd = os.environ.get( CUDA_VISIBLE_DEVICES )\n if not cvd: # Treat empty string and None the same\n return str(n)\n # This is an attempt to respect the fact that an invalid index hides\n # the GPUs listed after it\n cvd_parsed = list( itertools.takewhile( lambda i: not i.startswith('-'),\n cvd.split(',') ) )\n if 0 <= n < len( cvd_parsed ):\n return cvd_parsed[n]\n else:\n raise IndexError('Only {} visible GPUs; you asked for number {}!'\n .format( len( cvd_parsed ), n) )\n\ndef execute_command( cmd, stdout=None, stderr=None, gpu=None ):\n if gpu is None:\n env = None\n else:\n env = dict(os.environ)\n env[ CUDA_VISIBLE_DEVICES ] = get_real_gpu_index( gpu )\n return subprocess.call( cmd, stdout=stdout, stderr=stderr, env=env )\n\ndef get_script_path():\n return os.path.dirname( os.path.realpath( sys.argv[0] ) )\n\ndef get_pipeline_cmd( debug=False ):\n if os.name == 'nt':\n if debug:\n return [ 'kwiver.exe', 'runner' ]\n else:\n return [ 'kwiver.exe', 'runner' ]\n else:\n if debug:\n return [ 'gdb', '--args', 'kwiver', 'runner' ]\n else:\n return [ 'kwiver', 'runner' ]\n\ndef exit_with_error( error_str, force=False ):\n log_info( lb1 + 'ERROR: ' + error_str + lb2 )\n # Kill this process to end all threads\n if not isinstance( threading.current_thread(), threading._MainThread ):\n if os.name == 'nt':\n os.kill( os.getpid(), signal.SIGTERM )\n else:\n os.kill( os.getpid(), signal.SIGKILL )\n # Default exit case, if main thread\n sys.exit(0)\n\ndef check_file( filename ):\n if not os.path.exists( filename ):\n exit_with_error( \"Unable to find: \" + filename )\n return filename\n\n@contextlib.contextmanager\ndef get_log_output_files( output_prefix ):\n if os.name == 'nt':\n with open( output_prefix + '.out.txt', 'w' ) as fo, \\\n open( output_prefix + '.err.txt', 'w' ) as fe:\n yield dict( stdout=fo, stderr=fe)\n else:\n with open( output_prefix + '.txt', 'w' ) as fo:\n yield dict( stdout=fo, stderr=fo )\n\ndef find_file( filename ):\n if( os.path.exists( filename ) ):\n return filename\n elif os.path.exists( get_script_path() + div + filename ):\n return get_script_path() + div + filename\n else:\n exit_with_error( \"Unable to find \" + filename )\n\ndef make_filelist_for_dir( input_dir, output_dir, output_name ):\n # The most common extension in the folder is most likely images.\n # Sometimes people have small text files alongside the images\n # so just choose the most common filetype.\n exts = dict()\n files = dict()\n for f in sorted( os.listdir( input_dir ) ):\n f_fp = os.path.join( input_dir, f )\n if os.path.isfile( f_fp ):\n _, ext = os.path.splitext( f )\n if ext in exts:\n exts[ext] += 1\n files[ext].append( f_fp )\n else:\n exts[ext] = 1\n files[ext] = [ f_fp ]\n if len(exts) == 0:\n return \"\"\n\n top_ext = sorted( exts, key=exts.get, reverse=True )[0]\n\n # Write out list to file\n output_file = os.path.join( output_dir, output_name + \".txt\" )\n fout = open( output_file, \"w\" )\n for f in files[top_ext]:\n fout.write( os.path.abspath( f + lb1 ) )\n fout.close()\n return output_file\n\n# Other helpers\ndef signal_handler( signal, frame ):\n log_info( lb1 )\n exit_with_error( 'Processing aborted, see you next time' )\n\ndef file_length( filename ):\n if not os.path.exists( filename ):\n exit_with_error( filename + \" does not exist\" )\n with open( filename, 'r' ) as f:\n for i, l in enumerate( f ):\n pass\n return i + 1\n\ndef split_image_list( image_list_file, n, dir ):\n \"\"\"Create and return the paths to n temp files that when appended\n reproduce the original file. The names are created\n deterministically like \"orig_name_part0.ext\", \"orig_name_part1.ext\",\n etc., but with the original name used as is when n == 1.\n\n Existing files with the same names are overwritten without question.\n Deleting the files is the responsibility of the caller.\n\n \"\"\"\n input_basename = os.path.basename( image_list_file )\n if n == 1:\n new_file_names = [ input_basename ]\n else:\n prefix, suffix = os.path.splitext( input_basename )\n num_width = len( str( n - 1 ) )\n new_file_names = [\n prefix + '_part{:0{}}'.format( i, num_width ) + suffix\n for i in range( n )\n ]\n new_file_names = [ os.path.join( dir, fn ) for fn in new_file_names ]\n\n try:\n # Build manually to have the intermediate state in case of error\n temp_files = []\n divisor = math.floor( file_length( image_list_file ) / n ) + 1\n for fn in new_file_names:\n temp_files.append( open( fn, 'w' ) )\n with open( image_list_file ) as f:\n for i, line in enumerate( f ):\n temp_index = int( math.floor( i / divisor ) )\n temp_files[ temp_index ].write( line )\n finally:\n for f in temp_files:\n f.close()\n return new_file_names\n\ndef fset( setting_str ):\n return ['-s', setting_str]\n\ndef video_output_settings_list( options, basename ):\n output_dir = options.output_directory\n\n return list(itertools.chain(\n fset( 'detector_writer:file_name=' + output_dir + div + basename + detection_ext ),\n fset( 'track_writer:file_name=' + output_dir + div + basename + track_ext ),\n fset( 'track_writer:stream_identifier=' + basename ),\n fset( 'track_writer_db:writer:db:video_name=' + basename ),\n fset( 'track_writer_kw18:file_name=' + output_dir + div + basename + '.kw18' ),\n fset( 'descriptor_writer_db:writer:db:video_name=' + basename ),\n fset( 'track_descriptor:uid_basename=' + basename ),\n fset( 'kwa_writer:output_directory=' + output_dir ),\n fset( 'kwa_writer:base_filename=' + basename ),\n fset( 'kwa_writer:stream_id=' + basename ),\n ))\n\ndef plot_settings_list( options, basename ):\n output_dir = options.output_directory\n\n return list(itertools.chain(\n fset( 'detector_writer:file_name=' + output_dir + div + basename + detection_ext ),\n fset( 'kwa_writer:output_directory=' + output_dir ),\n fset( 'kwa_writer:base_filename=' + basename ),\n fset( 'kwa_writer:stream_id=' + basename ),\n ))\n\ndef archive_dimension_settings_list( options ):\n if len( options.archive_width ) > 0:\n return list(itertools.chain(\n fset( 'kwa_writer:fixed_col_count=' + options.archive_width ),\n fset( 'kwa_writer:fixed_row_count=' + options.archive_height ),\n ))\n return []\n\ndef object_detector_settings_list( options ):\n if len( options.detection_threshold ) > 0:\n return list( itertools.chain(\n fset( 'detector:detector:darknet:thresh=' + options.detection_threshold ),\n fset( 'detector1:detector:darknet:thresh=' + options.detection_threshold ),\n fset( 'detector2:detector:darknet:thresh=' + options.detection_threshold ),\n fset( 'detector_filter:filter:class_probablity_filter:threshold=' + options.detection_threshold ),\n ))\n return []\n\ndef object_tracker_settings_list( options ):\n if len( options.tracker_threshold ) > 0:\n return list( itertools.chain(\n fset( 'track_initializer:track_initializer:threshold:'\n 'filter:class_probablity_filter:threshold=' + options.tracker_threshold ),\n fset( 'tracker:detection_select_threshold=' + options.tracker_threshold ),\n ))\n return []\n\ndef video_frame_rate_settings_list( options ):\n output = []\n if len( options.input_frame_rate ) > 0:\n output += fset( 'input:frame_time=' + str( 1.0 / float( options.input_frame_rate ) ) )\n if len( options.frame_rate ) > 0:\n output += fset( 'downsampler:target_frame_rate=' + options.frame_rate )\n if len( options.batch_size ) > 0:\n output += fset( 'downsampler:burst_frame_count=' + options.batch_size )\n if len( options.batch_skip ) > 0:\n output += fset( 'downsampler:burst_frame_break=' + options.batch_skip )\n return output\n\ndef groundtruth_reader_settings_list( options, gt_files, basename, gpu_id, gt_type ):\n output = []\n if len( gt_files ) == 0:\n exit_with_error( \"Directory \" + basename + \" contains no GT files\" )\n elif len( gt_files ) > 1:\n exit_with_error( \"Directory \" + basename + \" contains multiple GT files\" )\n else:\n if gpu_id > 0:\n output_extension = str( gpu_id ) + '.lbl'\n else:\n output_extension = 'lbl'\n\n lbl_file = options.input_dir + \"/labels.txt\"\n if not os.path.exists( lbl_file ):\n lbl_file = \"labels.txt\"\n\n output += fset( 'detection_reader:file_name=' + gt_files[0] )\n output += fset( 'detection_reader:reader:type=' + gt_type )\n output += fset( 'write_descriptor_ids:category_file=' + lbl_file )\n output += fset( 'write_descriptor_ids:output_directory=' + options.output_directory )\n output += fset( 'write_descriptor_ids:output_extension=' + output_extension )\n return output\n\ndef remove_quotes( input_str ):\n return input_str.replace( \"\\\"\", \"\" )\n\ndef add_final_list_csv( args, video_list ):\n if len( video_list ) == 0:\n return\n for video in video_list:\n if video.endswith( \"_part0.txt\" ):\n output_file = video_list[0].replace( \"_part0.txt\", detection_ext )\n output_stream = open( output_file, \"w\" )\n id_adjustment = 0\n is_first = True\n used_ids = set()\n last_id = 0\n input_stream = open( video.replace( \".txt\", detection_ext ), \"r\" )\n id_mappings = dict()\n for line in input_stream:\n if len( line ) > 0 and ( line[0] == '#' or line[0:9] == 'target_id' ):\n if is_first:\n output_stream.write( line )\n continue\n parsed_line = line.rstrip().split(',')\n if len( parsed_line ) < 2:\n continue\n orig_id = int( parsed_line[0] )\n if orig_id in id_mappings:\n final_id = id_mappings[ orig_id ]\n elif orig_id in used_ids:\n last_id = last_id + 1\n final_id = last_id\n id_mappings[ orig_id ] = final_id\n used_ids.add( final_id )\n else:\n final_id = orig_id\n id_mappings[ orig_id ] = orig_id\n used_ids.add( orig_id )\n last_id = max( last_id, final_id )\n parsed_line[0] = str( final_id )\n parsed_line[2] = str( int( parsed_line[2] ) + id_adjustment )\n output_stream.write( ','.join( parsed_line ) + '\\n' )\n id_adjustment = id_adjustment + file_length( video )\n input_stream.close()\n is_first = False\n\n# Process a single video\ndef process_video_kwiver( input_name, options, is_image_list=False, base_ovrd='',\n cpu=0, gpu=None, write_track_time=True ):\n\n if gpu is None:\n gpu = 0\n\n multi_threaded = ( options.gpu_count * options.pipes > 1 )\n auto_detect_gt = ( len( options.auto_detect_gt ) > 0 )\n\n input_basename = os.path.basename( input_name )\n input_ext = os.path.splitext( input_name )[1]\n\n if multi_threaded:\n log_info( 'Processing: {} on GPU {}'.format( input_basename, gpu ) + lb1 )\n else:\n log_info( 'Processing: {} on GPU... '.format( input_basename ) )\n\n # Get video name without extension and full path\n if len( base_ovrd ) > 0:\n basename_no_ext = base_ovrd\n else:\n basename_no_ext = os.path.splitext( input_basename )[0]\n\n # Formulate input setting string\n if auto_detect_gt:\n if options.auto_detect_gt == 'habcam' or 'csv' in options.auto_detect_gt:\n gt_ext = '.csv'\n elif options.auto_detect_gt[0] != '.':\n gt_ext = '.' + options.auto_detect_gt\n else:\n gt_ext = options.auto_detect_gt\n\n if not is_image_list and \\\n ( input_ext == '.csv' or input_ext == '.txt' or input_name == \"__pycache__\" ):\n if multi_threaded:\n log_info( 'Skipped {} on GPU {}'.format( input_basename, gpu ) + lb1 )\n else:\n log_info( 'Skipped' + lb1 )\n return\n elif not os.path.exists( input_name ):\n if multi_threaded:\n log_info( 'Skipped {} on GPU {}'.format( input_basename, gpu ) + lb1 )\n else:\n log_info( 'Skipped' + lb1 )\n return\n elif os.path.isdir( input_name ):\n if auto_detect_gt:\n gt_files = list_files_in_dir_w_ext( input_name, gt_ext )\n input_name = make_filelist_for_dir( input_name, options.output_directory, basename_no_ext )\n if len( input_name ) == 0:\n if multi_threaded:\n log_info( 'Skipped {} on GPU {}'.format( input_basename, gpu ) + lb1 )\n else:\n log_info( 'Skipped' + lb1 )\n return\n is_image_list = True\n elif auto_detect_gt:\n input_path = os.path.dirname( os.path.abspath( input_name ) )\n gt_files = list_files_in_dir_w_ext( input_path, gt_ext )\n\n # Formulate command\n input_settings = fset( 'input:video_filename=' + input_name )\n\n if not is_image_list:\n input_settings += fset( 'input:video_reader:type=vidl_ffmpeg' )\n elif options.ts_from_file:\n input_settings += fset( 'input:video_reader:type=add_timestamp_from_filename' )\n\n command = ( get_pipeline_cmd( options.debug ) +\n [ find_file( options.pipeline ) ] +\n input_settings )\n\n command += video_frame_rate_settings_list( options )\n command += video_output_settings_list( options, basename_no_ext )\n command += archive_dimension_settings_list( options )\n command += object_detector_settings_list( options )\n command += object_tracker_settings_list( options )\n\n if options.write_svm_info and not auto_detect_gt:\n if len( options.input_detections ) == 0:\n exit_with_error( \"Input detections must be specified to write out svm header info\" )\n if not os.path.exists( options.input_detections ):\n exit_with_error( \"Unable to find input detections\" )\n gt_files = [ options.input_detections ]\n if auto_detect_gt or options.write_svm_info:\n gt_type = options.auto_detect_gt if auto_detect_gt else \"viame_csv\"\n command += groundtruth_reader_settings_list( options, gt_files, basename_no_ext, gpu, gt_type )\n\n if write_track_time:\n command += fset( 'track_writer:writer:viame_csv:write_time_as_uid=true' )\n else:\n command += fset( 'track_writer:writer:viame_csv:stream_identifier=' + input_basename )\n\n if len( options.input_detections ) > 0:\n command += fset( \"detection_reader:file_name=\" + options.input_detections )\n\n try:\n if len( options.extra_settings ) > 0:\n for extra_option in options.extra_settings:\n command += fset( \" \".join( extra_option ) )\n except:\n pass\n\n # Process command, possibly with logging\n log_base = \"\"\n if len( options.log_directory ) > 0 and not options.debug:\n log_base = options.output_directory + div + options.log_directory + div + basename_no_ext\n with get_log_output_files( log_base ) as kwargs:\n res = execute_command( command, gpu=gpu, **kwargs )\n else:\n res = execute_command( command, gpu=gpu )\n\n global any_video_complete\n\n if res == 0:\n if multi_threaded:\n log_info( 'Completed: {} on GPU {}'.format( input_basename, gpu ) + lb1 )\n else:\n log_info( 'Success' + lb1 )\n any_video_complete = True\n else:\n if multi_threaded:\n log_info( 'Failure: {} on GPU {} Failed'.format( input_basename, gpu ) + lb1 )\n else:\n log_info( 'Failure' + lb1 )\n\n if res == -11:\n s = os.statvfs( options.output_directory )\n\n if s.f_bavail * s.f_frsize < 100000000:\n exit_with_error( lb1 + 'Out of disk space. Clean up space and then re-run.' )\n\n log_info( lb1 + 'Pipeline failed with code 11. This is typically indicative of an '\n 'issue with system resources, e.g. low disk space or running out of '\n 'memory, but could be indicative of a pipeline issue. It\\'s also possible '\n 'the pipeline you are running just had a shutdown issue. Attempting to '\n 'continue processing.' + lb1 )\n\n any_video_complete = True\n\n if not any_video_complete:\n if len( log_base ) > 0:\n exit_with_error( 'Processing failed, check ' + log_base + '.txt, terminating.' )\n else:\n exit_with_error( 'Processing failed, terminating.' )\n elif len( log_base ) > 0:\n log_info( lb1 + 'Check ' + log_base + '.txt for error messages' + lb2 )\n\n# Main Function\nif __name__ == \"__main__\" :\n\n parser = argparse.ArgumentParser(description=\"Process new videos\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\"-v\", dest=\"input_video\", default=\"\",\n help=\"Input single video to process\")\n\n parser.add_argument(\"-d\", dest=\"input_dir\", default=\"\",\n help=\"Input directory of videos or image folders to process\")\n\n parser.add_argument(\"-l\", dest=\"input_list\", default=\"\",\n help=\"Input list of image files to process\")\n\n parser.add_argument(\"-p\", dest=\"pipeline\", default=default_pipeline,\n help=\"Input pipeline for processing video or image data\")\n\n parser.add_argument(\"-s\", dest=\"extra_settings\", action='append', nargs='*',\n help=\"Extra command line arguments for the pipeline runner\")\n\n parser.add_argument(\"-id\", dest=\"input_detections\", default=\"\",\n help=\"Input detections around which to create descriptors\")\n\n parser.add_argument(\"-o\", dest=\"output_directory\", default=\".\",\n help=\"Output directory to store files in\")\n\n parser.add_argument(\"-logs\", dest=\"log_directory\", default=\"logs\",\n help=\"Output sub-directory for log files, if empty will not use files\")\n\n parser.add_argument(\"-video-exts\", dest=\"video_exts\", default=\"3qp;3g2;amv;asf;avi;drc;gif;gifv;\"\n \"f4v;f4p;f4a;f4bflv;m4v;mkv;mp4;m4p;m4v;mpg;mpg2;mp2;mpeg;mpe;mpv;mng;mts;\"\n \"m2ts;mov;mxf;nsv;ogg;ogv;qt;roq;rm;rmvb;svi;webm;wmv;vob;yuv\",\n help=\"Allowable video extensions\")\n\n parser.add_argument(\"-image-exts\", dest=\"image_exts\", default=\"bmp;dds;gif;heic;jpg;jpeg;png;psd;\"\n \"psp;pspimage;tga;thm;tif;tiff;yuv\",\n help=\"Allowable image extensions\")\n\n parser.add_argument(\"-frate\", dest=\"frame_rate\", default=\"\",\n help=\"Processing frame rate over-ride to process videos at, specified \"\n \"in hertz (frames per second)\" )\n\n parser.add_argument(\"-fbatch\", dest=\"batch_size\", default=\"\",\n help=\"Optional number of frames to process in batches\")\n\n parser.add_argument(\"-fskip\", dest=\"batch_skip\", default=\"\",\n help=\"If batching frames, number of frames to skip between batches\")\n\n parser.add_argument(\"-ifrate\", dest=\"input_frame_rate\", default=\"\",\n help=\"Input frame rate over-ride to process videos at. This is useful \"\n \"for specifying the frame rate of input image lists, which typically \"\n \"don't have frame rates\")\n\n parser.add_argument(\"-detection-threshold\", dest=\"detection_threshold\", default=\"\",\n help=\"Optional detection threshold over-ride parameter\")\n\n parser.add_argument(\"-tracker-threshold\", dest=\"tracker_threshold\", default=\"\",\n help=\"Optional tracking threshold over-ride parameter\")\n\n parser.add_argument(\"-archive-height\", dest=\"archive_height\", default=\"\",\n help=\"Advanced: Optional video archive height over-ride\")\n\n parser.add_argument(\"-archive-width\", dest=\"archive_width\", default=\"\",\n help=\"Advanced: Optional video archive width over-ride\")\n\n parser.add_argument(\"-gpus\", \"--gpu-count\", default=1, type=int, metavar='N',\n help=\"Parallelize the ingest by using the first N GPUs in parallel\")\n\n parser.add_argument(\"-pipes-per-gpu\", \"--pipes\", default=1, type=int, metavar='N',\n help=\"Parallelize the ingest by using the first N GPUs in parallel\")\n\n parser.add_argument(\"--detection-plots\", dest=\"detection_plots\", action=\"store_true\",\n help=\"Produce per-video detection plot summaries\")\n\n parser.add_argument(\"--track-plots\", dest=\"track_plots\", action=\"store_true\",\n help=\"Produce per-video track plot summaries\")\n\n parser.add_argument(\"-plot-objects\", dest=\"objects\", default=\"fish\",\n help=\"Objects to generate plots for\")\n\n parser.add_argument(\"-plot-threshold\", dest=\"plot_threshold\", default=0.25, type=float,\n help=\"Threshold to generate plots for\")\n\n parser.add_argument(\"-plot-smooth\", dest=\"smooth\", default=1, type=int,\n help=\"Smoothing factor for plots\")\n\n parser.add_argument(\"-auto-detect-gt\", dest=\"auto_detect_gt\", default=\"\",\n help=\"Automatically pass to pipes GT of this type if present\")\n\n parser.add_argument(\"--init-db\", dest=\"init_db\", action=\"store_true\",\n help=\"Re-initialize database\")\n\n parser.add_argument(\"--build-index\", dest=\"build_index\", action=\"store_true\",\n help=\"Build searchable index on completion\")\n\n parser.add_argument(\"--ball-tree\", dest=\"ball_tree\", action=\"store_true\",\n help=\"Use a ball tree for the searchable index\")\n\n parser.add_argument(\"--no-reset-prompt\", dest=\"no_reset_prompt\", action=\"store_true\",\n help=\"Don't prompt if the output folder should be reset\")\n\n parser.add_argument(\"--ts-from-file\", dest=\"ts_from_file\", action=\"store_true\",\n help=\"Attempt to retrieve timestamps from image filenames.\")\n\n parser.add_argument(\"--write-svm-info\", dest=\"write_svm_info\", action=\"store_true\",\n help=\"Write out header information used for training SVMs\")\n\n parser.add_argument(\"--debug\", dest=\"debug\", action=\"store_true\",\n help=\"Run with debugger attached to process\")\n\n parser.add_argument(\"-install\", dest=\"install_dir\", default=\"\",\n help=\"Optional install dir over-ride for all application \"\n \"binaries. If this is not specified, it is expected that all \"\n \"viame binaries are already in our path.\")\n\n args = parser.parse_args()\n\n # Assorted error checking up front\n process_data = True\n\n number_input_args = sum(len(inp_x) > 0 for inp_x in [args.input_video, args.input_dir, args.input_list])\n\n if number_input_args == 0 or args.pipeline == no_pipeline:\n if not args.build_index and not args.detection_plots and not args.track_plots:\n exit_with_error( \"Either input video or input directory must be specified\" )\n else:\n process_data = False\n\n elif number_input_args > 1:\n exit_with_error( \"Only one of input video, directory, or list should be specified, not more\" )\n\n if ( args.detection_plots or args.track_plots ) and len( args.frame_rate ) == 0:\n exit_with_error( \"Must specify frame rate if generating detection or track plots\" )\n\n signal.signal( signal.SIGINT, signal_handler )\n\n # Initialize database\n if args.init_db:\n if len( args.log_directory ) > 0:\n init_log_file = args.output_directory + div + args.log_directory + div + \"database_log.txt\"\n else:\n init_log_file = \"\"\n db_is_init, user_select = database_tool.init( log_file=init_log_file )\n if not db_is_init:\n if user_select:\n log_info( \"User decided to not initialize new database, shutting down.\" + lb2 )\n sys.exit( 0 )\n elif len( args.log_directory ) > 0:\n exit_with_error( \"Unable to initialize database, check \" + init_log_file + lb2 +\n \"You may have another database running on your system, or ran \"\n \"a failed operation in the past and need to re-log or restart.\" )\n else:\n exit_with_error( \"Unable to initialize database\" )\n log_info( lb1 )\n\n # Call processing pipelines on all input data\n if process_data:\n\n # Handle output directory creation if necessary\n if len( args.output_directory ) > 0:\n recreate_dir = ( not args.init_db and not args.no_reset_prompt )\n prompt_user = ( not args.no_reset_prompt )\n create_dir( args.output_directory, logging=False, recreate=recreate_dir, prompt=prompt_user )\n\n if len( args.log_directory ) > 0:\n create_dir( args.output_directory + div + args.log_directory, logging=False )\n\n # Identify all videos to process\n if len( args.input_list ) > 0:\n if args.gpu_count > 1:\n video_list = split_image_list( args.input_list, args.gpu_count, args.output_directory )\n else:\n video_list = [ args.input_list ]\n is_image_list = True\n elif len( args.input_dir ) > 0:\n video_list = list_videos_in_dir( args.input_dir, args.video_exts )\n is_image_list = False\n else:\n video_list = [ args.input_video ]\n is_image_list = False\n\n if len( video_list ) == 0:\n exit_with_error( \"No videos found for ingest in given folder, exiting.\" )\n elif not is_image_list:\n if not args.init_db:\n log_info( lb1 )\n video_str = \" video\" if len( video_list ) == 1 else \" videos\"\n log_info( \"Processing \" + str( len( video_list ) ) + video_str + lb2 )\n elif not args.build_index:\n log_info( lb1 )\n\n # Get required paths\n pipeline_loc = args.pipeline\n\n # Check for local pipelines and pre-reqs present\n if \"_local.pipe\" in args.pipeline:\n if not os.path.exists( \"category_models/detector.pipe\" ):\n exit_with_error( \"Use of this script requires training a detector first\" )\n\n # Process videos in parallel, one per GPU\n video_queue = queue.Queue()\n for video_name in video_list:\n if os.path.isfile( video_name ) or os.path.isdir( video_name ):\n video_queue.put( video_name )\n else:\n log_info( \"Skipping unknown input: \" + video_name + lb )\n\n def process_video_thread( gpu, cpu ):\n while True:\n try:\n video_name = video_queue.get_nowait()\n except queue.Empty:\n break\n process_video_kwiver( video_name, args, is_image_list,\n cpu=cpu, gpu=gpu, write_track_time=not is_image_list )\n\n gpu_thread_list = [ i for i in range( args.gpu_count ) for _ in range( args.pipes ) ]\n cpu_thread_list = list( range( args.pipes ) ) * args.gpu_count\n\n threads = [ threading.Thread( target = process_video_thread, args = (gpu,cpu,) )\n for gpu, cpu in zip( gpu_thread_list, cpu_thread_list ) ]\n\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n if is_image_list:\n if args.gpu_count > 1: # Each thread outputs 1 list, add multiple\n add_final_list_csv( args, video_list )\n for image_list in video_list: # Clean up after split_image_list\n os.unlink( image_list )\n\n if not video_queue.empty():\n exit_with_error( \"Some videos were not processed!\" )\n\n # Build out detection vs time plots for both detections and tracks\n if args.detection_plots:\n import generate_detection_plots\n log_info( lb1 + \"Generating data plots for detections\" )\n detection_plot_dir = os.path.join( args.output_directory, \"detection_plots\" )\n create_dir( detection_plot_dir, logging=False, recreate=True, prompt=False )\n generate_detection_plots.detection_plot( args.output_directory,\n detection_plot_dir, args.objects.split( \",\" ), float( args.plot_threshold ),\n float( args.frame_rate ), int( args.smooth ),\n ext = detection_ext, top_category_only = False )\n\n if args.track_plots:\n import generate_detection_plots\n log_info( lb1 + \"Generating data plots for tracks\" )\n track_plot_dir = os.path.join( args.output_directory, \"track_plots\" )\n create_dir( track_plot_dir, logging=False, recreate=True, prompt=False )\n generate_detection_plots.detection_plot( args.output_directory,\n track_plot_dir, args.objects.split( \",\" ), float( args.plot_threshold ),\n float( args.frame_rate ), int( args.smooth ),\n ext = track_ext, top_category_only = True )\n\n if args.detection_plots or args.track_plots:\n log_info( lb1 )\n\n # Build searchable index\n if args.build_index:\n log_info( lb1 + \"Building searchable index\" + lb2 )\n\n if len( args.log_directory ) > 0:\n index_log_file = args.output_directory + div + args.log_directory + div + \"smqtk_indexer.txt\"\n else:\n index_log_file = \"\"\n\n if args.ball_tree:\n print( \"Warning: building a ball tree is deprecated\" )\n\n if not database_tool.build_standard_index( remove_quotes( args.install_dir ),\n log_file = index_log_file ):\n exit_with_error( \"Unable to build index\" )\n\n # Output complete message\n if os.name == 'nt':\n log_info( lb1 + \"Processing complete, close this window before launching any GUI.\" + lb2 )\n else:\n log_info( lb1 + \"Processing complete\" + lb2 )\n", "sub_path": "tools/process_video.py", "file_name": "process_video.py", "file_ext": "py", "file_size_in_byte": 31317, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.dont_write_bytecode", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.name", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.islink", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.readlink", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 81, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 82, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "database_tool.query_yes_no", "line_number": 90, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 93, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 101, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 110, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 110, "usage_type": "attribute"}, {"api_name": "itertools.takewhile", "line_number": 115, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 127, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 132, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.name", "line_number": 135, "usage_type": "attribute"}, {"api_name": "threading.current_thread", "line_number": 149, "usage_type": "call"}, {"api_name": "threading._MainThread", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.name", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.kill", "line_number": 151, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 151, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 151, "usage_type": "attribute"}, {"api_name": "os.kill", "line_number": 153, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 153, "usage_type": "call"}, {"api_name": "signal.SIGKILL", "line_number": 153, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.name", "line_number": 164, "usage_type": "attribute"}, {"api_name": "contextlib.contextmanager", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path", "line_number": 175, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path", "line_number": 187, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path", "line_number": 202, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 232, "usage_type": "call"}, {"api_name": "os.path", "line_number": 232, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path", "line_number": 236, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path", "line_number": 242, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 247, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 252, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 265, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 281, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 290, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 298, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 308, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path", "line_number": 340, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 404, "usage_type": "call"}, {"api_name": "os.path", "line_number": 404, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 405, "usage_type": "call"}, {"api_name": "os.path", "line_number": 405, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 416, "usage_type": "call"}, {"api_name": "os.path", "line_number": 416, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 434, "usage_type": "call"}, {"api_name": "os.path", "line_number": 434, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 440, "usage_type": "call"}, {"api_name": "os.path", "line_number": 440, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 452, "usage_type": "call"}, {"api_name": "os.path", "line_number": 452, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 452, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 476, "usage_type": "call"}, {"api_name": "os.path", "line_number": 476, "usage_type": "attribute"}, {"api_name": "os.statvfs", "line_number": 522, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 546, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 547, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 678, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 678, "usage_type": "attribute"}, {"api_name": "database_tool.init", "line_number": 686, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 690, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 740, "usage_type": "call"}, {"api_name": "os.path", "line_number": 740, "usage_type": "attribute"}, {"api_name": "Queue.Queue", "line_number": 744, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 746, "usage_type": "call"}, {"api_name": "os.path", "line_number": 746, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 746, "usage_type": "call"}, {"api_name": "Queue.Empty", "line_number": 755, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 763, "usage_type": "call"}, {"api_name": "os.unlink", "line_number": 775, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 784, "usage_type": "call"}, {"api_name": "os.path", "line_number": 784, "usage_type": "attribute"}, {"api_name": "generate_detection_plots.detection_plot", "line_number": 786, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 794, "usage_type": "call"}, {"api_name": "os.path", "line_number": 794, "usage_type": "attribute"}, {"api_name": "generate_detection_plots.detection_plot", "line_number": 796, "usage_type": "call"}, {"api_name": "database_tool.build_standard_index", "line_number": 816, "usage_type": "call"}, {"api_name": "os.name", "line_number": 821, "usage_type": "attribute"}]} +{"seq_id": "283681150", "text": "import scrapy\nfrom crawlerbot.items import TgItem\nimport json\nimport re\nimport os\nfrom datetime import datetime, timedelta\n\n\nclass tgrentSpider(scrapy.Spider):\n name = 'tgrentspider'\n output_name = 'tgrent'\n\n custom_settings = {\n 'ITEM_PIPELINES': {\n 'crawlerbot.pipelines.JsonPipeline': 300,\n 'crawlerbot.pipelines.MongoPipeline': 400\n }\n # 'LOG_FILE': 'crawlerbot/logs/demospider.log',\n # 'LOG_LEVEL': 'WARNING'\n }\n curDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n dirName = os.path.join(curDir, 'json')\n try:\n with open(os.path.join(dirName, 'tglinkrent.json'), 'r') as f:\n data = json.load(f)\n urls = [d['link'] for d in data]\n start_urls = urls\n # start_urls = urls[:10]\n except FileNotFoundError:\n pass\n\n def parse(self, response):\n item = TgItem()\n item['pid'] = response.xpath('//div[@class=\"unit_id\"]/strong/text()').extract_first()\n item['name'] = response.xpath('//h2[@class=\"project_title\"]/a/text()').extract_first()\n if item['name'] is None:\n item['name'] = response.xpath('//h2[@class=\"project_title\"]/text()').extract_first()\n item['location'] = response.xpath('//ul[@class=\"basic-list\"]/li/strong/text()').extract()[1].split(',')[0].strip()\n item['ptype'] = response.xpath('//ul[@class=\"basic-list\"]/li/strong/text()').extract()[0]\n item['size'] = response.xpath('//div[@class=\"txt\"]/strong/text()').extract()[2].split()[0]\n if item['ptype'] == 'Condo':\n item['floor'] = response.xpath('//ul[@class=\"property-list\"]/li/img[contains(@class,\"floor_logo\")]/../div/text()').extract_first().strip()\n else:\n item['floor'] = response.xpath('//ul[@class=\"basic-list\"]/li/span[contains(text(),\"Number of Floors in Building\")]/../strong/text()').extract_first().strip()\n complete_year = response.xpath('//ul[@class=\"basic-list\"]/li/strong[contains(text(),\"Completed\")]/text()').extract_first()\n if complete_year is None: \n item[\"yearbuilt\"] = \"Offplan\"\n else:\n yearbuilt = complete_year.split()\n if len(yearbuilt) == 3:\n item['yearbuilt'] = yearbuilt[1][1:] + ' ' + yearbuilt[2][:-1]\n else:\n item['yearbuilt'] = \"Completed\"\n item['price'] = response.xpath('//span[@class=\"unit_price\"]/text()').extract_first()[1:]\n if item['price'] is not None:\n item['price'] = item['price'].replace(',','')\n item['bed'] = response.xpath('//div[@class=\"txt\"]/strong/text()').extract_first()\n item['bath'] = response.xpath('//div[@class=\"txt\"]/strong/text()').extract()[1]\n item['furniture'] = response.xpath('//ul[@class=\"basic-list\"]/li/span[contains(text(),\"Furniture\")]/../strong/text()').extract_first().strip()\n daypost = response.xpath('//li[@class=\"hidden-sm hidden-xs\"]/text()').extract_first().split()[0]\n item['daypost'] = datetime.utcnow() - timedelta(days=int(daypost))\n # Map\n map_url = \"https://property.thethaiger.com/get-map/unit/\" + item['pid']\n request = scrapy.Request(map_url, callback=self.parse_latlng)\n request.meta['item'] = item\n return request\n \n def parse_latlng(self, response):\n item = response.meta['item']\n lat = response.xpath('//input[@class=\"centerLat\"]/@value').extract_first()\n lng = response.xpath('//input[@class=\"centerLng\"]/@value').extract_first()\n item['latlng'] = [lat, lng]\n return item\n \n", "sub_path": "crawlerbot/spiders/tgrent.py", "file_name": "tgrent.py", "file_ext": "py", "file_size_in_byte": 3618, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "scrapy.Spider", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "crawlerbot.items.TgItem", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 61, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "291148682", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 29 17:31:56 2019\n\n@author: RGOUSSAULT\n\"\"\"\nimport utils\nimport constants\nimport keras\n\nclass Node:\n def __init__(self, x_train, x_test, y_train, y_test):\n self.x_train = x_train\n self.x_val = []\n self.x_test = x_test\n \n self.y_train = y_train\n self.y_val = []\n self.y_test = y_test\n\n\n def get_x_train_len(self):\n return len(self.x_train)\n\n\n def preprocess_data(self):\n self.x_train = utils.preprocess_input(self.x_train)\n self.x_test = utils.preprocess_input(self.x_test)\n\n # Preprocess labels (y) data\n self.y_train = keras.utils.to_categorical(self.y_train, constants.NUM_CLASSES)\n self.y_test = keras.utils.to_categorical(self.y_test, constants.NUM_CLASSES)\n", "sub_path": "node.py", "file_name": "node.py", "file_ext": "py", "file_size_in_byte": 749, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "utils.preprocess_input", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.preprocess_input", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 31, "usage_type": "attribute"}, {"api_name": "constants.NUM_CLASSES", "line_number": 31, "usage_type": "attribute"}, {"api_name": "keras.utils.to_categorical", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 32, "usage_type": "attribute"}, {"api_name": "constants.NUM_CLASSES", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "84505226", "text": "import asyncio\nimport random\nfrom typing import Dict\n\nfrom readers import CollectionReader, NodeReader\n\n\nclass DataMuxProtocol:\n def __init__(\n self,\n LIST_LIVE_STREAMS=b\"list_live_streams\",\n RELAY_LIVE_STREAM=b\"relay_live_stream\",\n LIST_COLLECTIONS=b\"list_collections\",\n LIST_COLLECTION_STREAMS=b\"list_collection_streams\",\n REPLAY_COLLECTION_STREAM=b\"replay_collection_stream\",\n RESTREAM_COLLECTION_STREAM=b\"restream_collection_stream\",\n ):\n self.LIST_LIVE_STREAMS = LIST_LIVE_STREAMS\n self.RELAY_LIVE_STREAM = RELAY_LIVE_STREAM\n self.LIST_COLLECTIONS = LIST_COLLECTIONS\n self.LIST_COLLECTION_STREAMS = LIST_COLLECTION_STREAMS\n self.REPLAY_COLLECTION_STREAM = REPLAY_COLLECTION_STREAM\n self.RESTREAM_COLLECTION_STREAM = RESTREAM_COLLECTION_STREAM\n\n\nclass DataMuxServer:\n \"\"\"\n Main Class for DataMux Functionality\n\n It provides three modes of execution.\n\n * **Proxy** mode: proxy live LSL data streams on the local network\n * **Replay** mode: replay datasets from storage to mimic a live data stream\n * **Simulate** mode: simulate random/guided data as a data stream\n\n \"\"\"\n\n def __init__(\n self,\n protocol: DataMuxProtocol,\n ) -> None:\n self.reader_n = NodeReader()\n self.reader_c = CollectionReader()\n self.queue = asyncio.Queue()\n self.protocol = protocol\n\n def __gen_randseq(self, length: int = 5):\n options = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'\n return ''.join(random.choice(options) for x in range(length))\n\n def list_live_streams(\n self,\n ):\n \"\"\"\n List all live streams\n\n \"\"\"\n self.reader_n.refresh_streams()\n streams = self.reader_n.list_streams()\n return dict(\n streams=[s.model_dump() for s in streams],\n )\n\n def relay_live_streams(\n self,\n stream_name: str,\n attrs: dict,\n ):\n \"\"\"\n Relay data from a live stream\n\n \"\"\"\n randseq = self.__gen_randseq()\n task = self.reader_n.relay(stream_name, attrs, randseq, self.queue)\n status = 0 if task.cancelled() else 1\n return dict(\n stream_name=stream_name,\n status=status,\n randseq=randseq,\n )\n\n def list_collections(\n self,\n ):\n \"\"\"\n List all collections\n\n \"\"\"\n self.reader_c.refresh_collections()\n collections = self.reader_c.list_collections()\n return dict(\n collections=[c.model_dump() for c in collections],\n )\n\n def list_collection_streams(\n self,\n collection_name: str,\n ):\n \"\"\"\n List all streams in a collection\n\n \"\"\"\n streams = self.reader_c.list_streams(collection_name)\n return dict(\n collection_name=collection_name,\n streams=[s.model_dump() for s in streams],\n )\n\n def replay_collection_stream(\n self,\n collection_name: str,\n stream_name: str,\n attrs: dict,\n ):\n \"\"\"\n Replay one stream in a collection directly.\n\n \"\"\"\n randseq = self.__gen_randseq()\n task = self.reader_c.replay(collection_name, stream_name, attrs, randseq, self.queue)\n status = 0 if task.cancelled() else 1\n return dict(\n collection_name=collection_name,\n stream_name=stream_name,\n attrs=attrs,\n status=status,\n randseq=randseq,\n )\n\n def restream_collection_stream(\n self,\n collection_name: str,\n stream_name: str,\n attrs: dict,\n ):\n \"\"\"\n Replay one stream in a collection via LSL\n\n \"\"\"\n randseq = self.__gen_randseq()\n task = self.reader_c.restream(collection_name, stream_name, attrs, randseq)\n status = 0 if task.cancelled() else 1\n return dict(\n collection_name=collection_name,\n stream_name=stream_name,\n attrs=attrs,\n status=status,\n randseq=randseq,\n )\n\n def process(\n self,\n topic: bytes,\n **content,\n ):\n \"\"\"\n Handle Requests sent to DataMuxServer\n\n Args:\n topic (bytes): topic of the message\n content (dict): content of the message\n \"\"\"\n\n # LIVE MODE (LSL -> Queue) =================================================================================================\n if topic == self.protocol.LIST_LIVE_STREAMS:\n retval = self.list_live_streams()\n elif topic == self.protocol.RELAY_LIVE_STREAM:\n retval = self.relay_live_streams(\n content[\"stream_name\"],\n content[\"attrs\"],\n )\n # REPLAY MODE (File -> Queue) ==============================================================================================\n elif topic == self.protocol.LIST_COLLECTIONS:\n retval = self.list_collections()\n elif topic == self.protocol.LIST_COLLECTION_STREAMS:\n retval = self.list_collection_streams(\n content[\"collection_name\"],\n )\n elif topic == self.protocol.REPLAY_COLLECTION_STREAM:\n retval = self.replay_collection_stream(\n content[\"collection_name\"],\n content[\"stream_name\"],\n content[\"attrs\"],\n )\n # RESTREAM MODE (File -> LSL) ==============================================================================================\n elif topic == self.protocol.RESTREAM_COLLECTION_STREAM:\n collection_name = content[\"collection_name\"]\n stream_name = content[\"stream_name\"]\n attrs = content[\"attrs\"]\n retval = self.restream_collection_stream(\n collection_name,\n stream_name,\n attrs,\n )\n # FALLBACK =================================================================================================================\n else:\n retval = dict(error=\"Unknown Request\")\n\n return self.queue.put((topic, retval))\n\n def deque(\n self,\n ):\n \"\"\"\n Get the next pending message, if any\n\n \"\"\"\n return self.queue.get()\n\n\nclass DataMuxClient:\n \"\"\"\n Client to generate requests under the DataMux protocol\n\n \"\"\"\n\n def __init__(\n self,\n protocol: DataMuxProtocol,\n ) -> None:\n self.protocol = protocol\n\n def list_live_streams(\n self,\n ):\n \"\"\"\n List all live streams\n\n \"\"\"\n topic = self.protocol.LIST_LIVE_STREAMS\n content: Dict[str, str] = {}\n return topic, content\n\n def relay_live_streams(\n self,\n stream_name: str,\n attrs: dict,\n ):\n \"\"\"\n Relay data from a live stream\n\n \"\"\"\n topic = self.protocol.RELAY_LIVE_STREAM\n content = dict(stream_name=stream_name, attrs=attrs)\n return topic, content\n\n def list_collections(\n self,\n ):\n \"\"\"\n List all collections\n\n \"\"\"\n topic = self.protocol.LIST_COLLECTIONS\n content: Dict[str, str] = {}\n return topic, content\n\n def list_collection_streams(\n self,\n collection_name: str,\n ):\n \"\"\"\n List all streams in a collection\n\n \"\"\"\n topic = self.protocol.LIST_COLLECTION_STREAMS\n content = dict(collection_name=collection_name)\n return topic, content\n\n def replay_collection_stream(\n self,\n collection_name: str,\n stream_name: str,\n attrs: Dict[str, str],\n ):\n \"\"\"\n Replay one stream in a collection directly.\n\n \"\"\"\n topic = self.protocol.REPLAY_COLLECTION_STREAM\n content = dict(\n collection_name=collection_name,\n stream_name=stream_name,\n attrs=attrs,\n )\n return topic, content\n\n def restream_collection_stream(\n self,\n collection_name: str,\n stream_name: str,\n attrs: Dict[str, str],\n ):\n \"\"\"\n Replay one stream in a collection via LSL\n\n \"\"\"\n topic = self.protocol.RESTREAM_COLLECTION_STREAM\n content = dict(\n collection_name=collection_name,\n stream_name=stream_name,\n attrs=attrs,\n )\n return topic, content\n", "sub_path": "datamux/datamux.py", "file_name": "datamux.py", "file_ext": "py", "file_size_in_byte": 8487, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "readers.NodeReader", "line_number": 42, "usage_type": "call"}, {"api_name": "readers.CollectionReader", "line_number": 43, "usage_type": "call"}, {"api_name": "asyncio.Queue", "line_number": 44, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 49, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 231, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 255, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 274, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 292, "usage_type": "name"}]} +{"seq_id": "517224697", "text": "from math import floor, log2\r\nfrom operator import xor\r\nfrom typing import List, Tuple\r\n\r\n# cells.length == 8\r\n# 1 <= N <= 10^9\r\n# 如果一间牢房的两个相邻的房间都被占用或都是空的,那么该牢房就会被占用。 => 即 left^right^1\r\n# 否则,它就会被空置。\r\n\r\n# 答案:模拟每一天监狱的状态。\r\n# 注意loop从第一天开始,最好是先算出第一天\r\n# https://leetcode.com/problems/prison-cells-after-n-days/discuss/591304/Simple-Python-Solution\r\n\r\n# n要与状态同时更新\r\n\r\n# !哈希表记录周期(鸽巢原理)\r\n\r\n\r\nclass Solution:\r\n def prisonAfterNDays(self, cells: List[int], n: int) -> List[int]:\r\n def move(preState: List[int]):\r\n return [int(i > 0 and i < 7 and preState[i - 1] == preState[i + 1]) for i in range(8)]\r\n\r\n visited = dict()\r\n while n:\r\n visited[tuple(cells)] = n # !保存当前状态\r\n cells = move(cells) # !线性转移\r\n n -= 1\r\n\r\n if tuple(cells) in visited: # !寻找周期加速\r\n period = visited[tuple(cells)] - n\r\n\r\n n %= period\r\n\r\n return cells\r\n\r\n\r\n# print(Solution().prisonAfterNDays(cells=[0, 1, 0, 1, 1, 0, 0, 1], n=7))\r\nprint(Solution().prisonAfterNDays(cells=[0, 1, 0, 1, 1, 0, 0, 1], n=7))\r\n# 输出:[0,0,1,1,0,0,0,0]\r\n# 解释:\r\n# 下表概述了监狱每天的状况:\r\n# Day 0: [0, 1, 0, 1, 1, 0, 0, 1]\r\n# Day 1: [0, 1, 1, 0, 0, 0, 0, 0]\r\n# Day 2: [0, 0, 0, 0, 1, 1, 1, 0]\r\n# Day 3: [0, 1, 1, 0, 0, 1, 0, 0]\r\n# Day 4: [0, 0, 0, 0, 0, 1, 0, 0]\r\n# Day 5: [0, 1, 1, 1, 0, 1, 0, 0]\r\n# Day 6: [0, 0, 1, 0, 1, 1, 0, 0]\r\n# Day 7: [0, 0, 1, 1, 0, 0, 0, 0]\r\n", "sub_path": "22_专题/倍增与周期性/957. N 天后的牢房-哈希表找周期.py", "file_name": "957. N 天后的牢房-哈希表找周期.py", "file_ext": "py", "file_size_in_byte": 1674, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "433794982", "text": "\"\"\"\nSuper-resolution of CelebA using Generative Adversarial Networks.\n\nThe dataset can be downloaded from: https://www.dropbox.com/sh/8oqt9vytwxb3s4r/AADIKlz8PR9zr6Y20qbkunrba/Img/img_align_celeba.zip?dl=0\n\nInstrustion on running the script:\n1. Download the dataset from the provided link\n2. Save the folder 'img_align_celeba' to 'datasets/'\n4. Run the sript using command 'python srgan.py'\n\"\"\"\n\nfrom __future__ import print_function, division\nimport scipy\n\nfrom keras.datasets import mnist\nfrom keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\nfrom keras import layers\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate, AveragePooling2D\nfrom keras.layers import BatchNormalization, Activation, ZeroPadding2D, Add, Lambda, Subtract\nfrom keras.layers.advanced_activations import PReLU, LeakyReLU\nfrom keras.layers.convolutional import UpSampling2D, Conv2D\nfrom keras.applications import VGG19\nfrom keras.models import Sequential, Model, load_model\nfrom keras.optimizers import Adam\nimport datetime\nimport matplotlib.pyplot as plt\nimport sys\nfrom kes_data_loader import DataLoader\nimport numpy as np\n\nfrom PIL import Image\nimport os\n\nimport math\nimport scipy\n\nimport tensorflow as tf\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)\n\nfrom keras.backend import tensorflow_backend as K\n# config = tf.compat.v1.ConfigProto() #tf.ConfigProto()\n# config.gpu_options.allow_growth = True\n# K.set_session(tf.compat.v1.Session(config=config))#tf.Session\n# K.set_learning_phase(1)\n\nold_v = tf.compat.v1.logging.get_verbosity()\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\n\nclass SRGAN():\n def __init__(self):\n # Input shape\n self.scale_factor = 2 # 2x\n self.channels = 3\n self.hr_height = 256 # High resolution height\n self.hr_width = 256 # High resolution width\n self.hr_shape = (self.hr_height, self.hr_width, self.channels)\n self.lr_height = int(self.hr_height / self.scale_factor) # Low resolution height\n self.lr_width = int(self.hr_width / self.scale_factor) # Low resolution width\n self.lr_shape = (self.lr_height, self.lr_width, self.channels)\n\n ## PARAMETERS\n # Number of residual blocks in the generator\n self.dataset_name = 'Linnaeus5'\n self.model_name = \"210205_Cycle_Lanczos_e3,1,e1\"\n self.n_residual_blocks = 8\n self.Epochs = 0\n self.d_pair = 0 # 0 : hr, 1 : lr\n self.f_pair = 0 # 0 : hr, 1 : lr\n self.p_pair = 0 # 0 : hr, 1 : lr\n self.weights = [1e-3, 1, 1e-1]\n self.interpolation = Image.LANCZOS #compare\n self.up_interpolation = Image.BILINEAR #up\n\n self.interpolation_str = 'Nearest'\n if self.interpolation == Image.BILINEAR:\n self.interpolation_str = 'Bilinear'\n elif self.interpolation == Image.BICUBIC:\n self.interpolation_str = 'Bicubic'\n elif self.interpolation == Image.HAMMING:\n self.interpolation_str = 'Hamming'\n elif self.interpolation == Image.LANCZOS:\n self.interpolation_str = 'Lanczos'\n\n self.up_interpolation_str = 'Nearest'\n if self.up_interpolation == Image.BILINEAR:\n self.up_interpolation_str = 'Bilinear'\n elif self.up_interpolation == Image.BICUBIC:\n self.up_interpolation_str = 'Bicubic'\n elif self.up_interpolation == Image.HAMMING:\n self.up_interpolation_str= 'Hamming'\n elif self.up_interpolation == Image.LANCZOS:\n self.up_interpolation_str = 'Lanczos'\n\n optimizer = Adam(0.0002, 0.5)\n\n # We use a pre-trained VGG19 model to extract image features from the high resolution\n # and the generated high resolution images and minimize the mse between them\n self.vgg = self.build_vgg()\n self.vgg.trainable = False\n self.vgg.compile(loss='mse',\n optimizer=optimizer,\n metrics=['accuracy'])\n\n\n self.vgg_lr = self.build_vgg_lr()\n self.vgg_lr.trainable = False\n self.vgg_lr.compile(loss='mse',\n optimizer=optimizer,\n metrics=['accuracy'])\n\n # Configure data loader\n self.data_loader = DataLoader(dataset_name=self.dataset_name,\n img_res=(self.hr_height, self.hr_width),\n low_img_res=(self.lr_height, self.lr_width),\n interpolation=self.interpolation,\n up_interpolation=self.up_interpolation)\n self.total_sample = self.data_loader.get_total()\n\n # Calculate output shape of D (PatchGAN)\n patch = int(self.hr_height / 2**4)\n self.disc_patch = (patch, patch, 1)\n\n # Number of filters in the first layer of G and D\n self.gf = 64\n self.df = 64\n\n # Build and compile the discriminator\n self.discriminator = self.build_discriminator()\n if self.Epochs != 0:\n self.discriminator = load_model(\"./saved_model/{}/Discriminator_{}.h5\".format(self.model_name, self.Epochs))\n self.discriminator.compile(loss='mse',\n optimizer=optimizer,\n metrics=['accuracy'])\n\n # Build the generator\n self.generator = self.build_generator() # build_generator\n if self.Epochs != 0:\n self.generator = load_model(\"./saved_model/{}/Generator_{}.h5\".format(self.model_name, self.Epochs))\n\n # High res. and low res. images\n img_hr = Input(shape=self.hr_shape)\n img_lr = Input(shape=self.lr_shape) # not lr_shape\n\n # Generate high res. version from low res.\n fake_lr, fake_hr = self.generator(img_hr)\n\n # Extract image features of the generated img\n fake_features = self.vgg(fake_hr)\n fake_lr2, fake_hr2 = self.generator(fake_hr)\n fake_features2 = self.vgg(fake_hr2)\n fake_lr_features = self.vgg_lr(fake_lr)\n\n # For the combined model we will only train the generator\n #self.discriminator.trainable = False\n self.discriminator_frozen = Model(self.discriminator.input, self.discriminator.output)\n self.discriminator_frozen.trainable = False\n\n # Discriminator determines validity of generated high res. images\n validity = self.discriminator_frozen(fake_hr)\n fake_validity = self.discriminator_frozen(fake_hr)\n lr_validity = self.discriminator_frozen(img_lr)\n #validity = Subtract()([fake_validity, lr_validity])\n\n self.combined = Model([img_hr, img_lr], [validity, fake_features, fake_features2])#fake_hr])\n self.combined.compile(loss=['binary_crossentropy', 'mse', 'mse'],\n loss_weights=self.weights, #1e-3, 1, 1\n optimizer=optimizer)\n\n def build_vgg(self):\n \"\"\"\n Builds a pre-trained VGG19 model that outputs image features extracted at the\n third block of the model\n \"\"\"\n vgg = VGG19(weights=\"imagenet\")\n # Set outputs to outputs of last conv. layer in block 3\n # See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py\n vgg.outputs = [vgg.layers[9].output]\n\n img = Input(shape=(self.hr_width, self.hr_height, 3))\n\n # Extract image features\n img_features = vgg(img)\n\n model = Model(inputs=[img], outputs=[img_features], name='vgg')\n return model\n\n def build_vgg_lr(self):\n \"\"\"\n Builds a pre-trained VGG19 model that outputs image features extracted at the\n third block of the model\n \"\"\"\n vgg = VGG19(weights=\"imagenet\")\n # Set outputs to outputs of last conv. layer in block 3\n # See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py\n vgg.outputs = [vgg.layers[9].output]\n\n img = Input(shape=(self.lr_width, self.lr_height, 3))\n\n # Extract image features\n img_features = vgg(img)\n\n model = Model(inputs=[img], outputs=[img_features], name='vgg_lr')\n return model\n\n #return Model(img, img_features)\n\n # def build_generator(self):\n #\n # def residual_block(layer_input, filters):\n # \"\"\"Residual block described in paper\"\"\"\n # d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n # d = Activation('relu')(d)\n # d = BatchNormalization(momentum=0.8)(d)\n # d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)\n # d = BatchNormalization(momentum=0.8)(d)\n # d = Add()([d, layer_input])\n # return d\n #\n # def d_block(layer_input, filters, strides=1, bn=True):\n # \"\"\"Discriminator layer\"\"\"\n # d = Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)\n # d = Activation('relu')(d)\n # #d = LeakyReLU(alpha=0.2)(d)\n # if bn:\n # d = BatchNormalization(momentum=0.8)(d)\n # return d\n # # Low resolution image input\n # img_hr = Input(shape=self.hr_shape)\n #\n # # Pre-residual block\n # c1 = Conv2D(self.gf, kernel_size=9, strides=1, padding='same')(img_hr)\n # c1 = Activation('relu')(c1)\n # c2 = c1\n # for i in range(int(math.log2(self.scale_factor))):\n # c2 = d_block(c2, self.gf*2, strides=2)\n # c2 = d_block(c2, self.gf*2, strides=1)\n #\n # # Propogate through residual blocks\n # r = residual_block(c2, self.gf)\n # for _ in range(self.n_residual_blocks - 1):\n # r = residual_block(r, self.gf)\n #\n # # Post-residual block\n # c3 = Conv2D(self.gf, kernel_size=3, strides=1, padding='same')(r)\n # c3 = BatchNormalization(momentum=0.8)(c3)\n # c3 = Add()([c3, c2])\n # c3 = Conv2D(self.gf * self.scale_factor *2, kernel_size=3, strides=1, padding='same')(c3) ###\n # #c3 = BatchNormalization(momentum=0.8)(c3)\n #\n # # Generate high resolution output\n # u1 = Conv2D(self.channels, kernel_size=9, strides=1, padding='same', activation='tanh')(c3)\n #\n # # Upsampling\n # gen_lr = UpSampling2D(size=self.scale_factor)(u1)\n #\n # model = Model(img_hr, gen_lr)\n # model.summary()\n # return model\n\n def build_generator(self):\n def DownScale(input):\n width = (input.shape[1]//2)\n height = (input.shape[2]//2)\n return tf.image.resize(input, (width, height), method='bilinear')\n def UpScale(input):\n width = (input.shape[1]*2)\n height = (input.shape[2]*2)\n return tf.image.resize(input, (width, height), method='bilinear')\n def Cl(input):\n return tf.keras.backend.clip(input, -1, 1)\n\n def residual_block(layer_input, filters):\n \"\"\"Residual block described in paper\"\"\"\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)\n d = Activation('relu')(d)\n #d = BatchNormalization(momentum=0.8)(d)\n d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)\n #d = BatchNormalization(momentum=0.8)(d)\n d = Add()([d, layer_input])\n return d\n\n def d_block(layer_input, filters, strides=1, bn=False):\n \"\"\"Discriminator layer\"\"\"\n d = Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)\n d = Activation('relu')(d)\n #d = LeakyReLU(alpha=0.2)(d)\n if bn:\n d = BatchNormalization(momentum=0.8)(d)\n return d\n # Low resolution image input\n img_hr = Input(shape=self.hr_shape)\n filter_hr = Conv2D(3, kernel_size=3, strides=1, padding='same')(img_hr)\n filter_hr = Conv2D(3, kernel_size=3, strides=1, padding='same')(filter_hr)\n filter_hr = Conv2D(3, kernel_size=3, strides=1, padding='same')(filter_hr)\n filter_hr = Activation('tanh')(filter_hr)\n\n img_lr = Lambda(DownScale)(filter_hr)\n #img_lr = AveragePooling2D(strides=4, pool_size=(4, 4), padding='same')(img_hr)\n # Pre-residual block\n c1 = Conv2D(self.gf, kernel_size=3, strides=2, padding='same')(img_hr)\n #c1 = Conv2D(self.gf, kernel_size=4, strides=2, padding='same')(c1)\n c1 = Activation('relu')(c1)\n c2 = c1\n # for i in range(int(math.log2(self.scale_factor))):\n # c2 = d_block(c2, self.gf*2, strides=2)\n # c2 = d_block(c2, self.gf*2, strides=1)\n\n # Propogate through residual blocks\n # r = residual_block(c2, self.gf)\n # for _ in range(self.n_residual_blocks - 1):\n # r = residual_block(r, self.gf)\n for _ in range(self.n_residual_blocks):\n c2 = d_block(c2, self.gf, strides=1)\n # Post-residual block\n c3 = Conv2D(self.gf, kernel_size=3, strides=1, padding='same')(c2)\n #c3 = BatchNormalization(momentum=0.8)(c3)\n #c3 = Conv2D(self.gf * self.scale_factor, kernel_size=3, strides=1, padding='same')(c3) ###\n #c3 = BatchNormalization(momentum=0.8)(c3)\n\n # Generate high resolution output\n c3 = Conv2D(self.channels, kernel_size=3, strides=1, padding='same')(c3)#, activation='tanh')(c3)\n u1 = Add()([c3, img_lr])\n\n #lambda_clip = Lambda(lambda x: tf.keras.backend.clip(x, -1, 1))\n #u1 = lambda_clip(u1)\n #u1 = Clip(name='Clip_1')(u1)\n #u1 = Clip(name='Clip_2')(u1)\n #u1 = Lambda(Cl)(u1)\n u1 = Activation('tanh')(u1)\n #u1 = Conv2D(self.channels, kernel_size=3, strides=1, padding='same', activation='tanh')(u1)\n # Upsampling\n #gen_lr = UpSampling2D(size=self.scale_factor)(u1)\n gen_lr = Lambda(UpScale)(u1)\n\n model = Model(inputs=img_hr, outputs=[u1, gen_lr])\n model.summary()\n return model\n\n def build_discriminator(self):\n\n def d_block(layer_input, filters, strides=1, bn=True):\n \"\"\"Discriminator layer\"\"\"\n d = Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)\n d = LeakyReLU(alpha=0.2)(d)\n if bn:\n d = BatchNormalization(momentum=0.8)(d)\n return d\n\n # Input img\n d0 = Input(shape=self.hr_shape)\n\n d1 = d_block(d0, self.df, bn=False)\n d2 = d_block(d1, self.df, strides=2)\n d3 = d_block(d2, self.df*2)\n d4 = d_block(d3, self.df*2, strides=2)\n d5 = d_block(d4, self.df*4)\n d6 = d_block(d5, self.df*4, strides=2)\n d7 = d_block(d6, self.df*8)\n d8 = d_block(d7, self.df*8, strides=2)\n\n d9 = Dense(self.df*16)(d8)\n d10 = LeakyReLU(alpha=0.2)(d9)\n validity = Dense(1, activation='sigmoid')(d10)\n\n model = Model(d0, validity)\n model.summary()\n return model\n\n def train(self, epochs, batch_size=1, sample_interval=50):\n print(\"train start\")\n start_time = datetime.datetime.now()\n #minibatch = self.data_loader.get_batch(batch_size)\n\n t_epoch = []\n v_epoch = []\n t_D_Loss = []\n t_G_Loss = []\n t_D_Acc = []\n v_D_Acc = []\n v_D_Loss = []\n v_G_Loss = []\n D_Loss = 0.\n D_Acc = 0.\n G_Loss = 0.\n best_test_loss = 999\n for epoch in range(epochs):\n for batch_i, (imgs_hr, imgs_lr) in enumerate(self.data_loader.load_batch(batch_size)):\n valid = np.ones((batch_size,) + self.disc_patch)\n fake = np.zeros((batch_size,) + self.disc_patch)\n #for batch_i in range(minibatch):\n\n # ----------------------\n # Train Discriminator\n # ----------------------\n\n # From low res. image generate high res. version\n fake_lr, fake_hr = self.generator.predict(imgs_hr)\n\n # Train the discriminators (original images = real / generated = Fake)\n d_loss_real = []\n if self.d_pair == 0:\n d_loss_real = self.discriminator.train_on_batch(imgs_hr, valid)\n if self.d_pair == 1:\n d_loss_real = self.discriminator.train_on_batch(imgs_lr, valid)\n d_loss_fake = self.discriminator.train_on_batch(fake_hr, fake)\n d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)\n\n # ------------------\n # Train Generator\n # ------------------\n\n # Extract ground truth image features using pre-trained VGG19 model\n image_features = []\n image_features_lr = []\n if self.f_pair == 0:\n image_features = self.vgg.predict(imgs_hr)\n image_features_lr = self.vgg_lr.predict(imgs_lr)\n if self.f_pair == 1:\n image_features = self.vgg.predict(imgs_lr)\n # Train the generators\n g_loss = []\n if self.p_pair == 0:\n g_loss = self.combined.train_on_batch([imgs_hr, imgs_lr], [valid, image_features, image_features])\n if self.p_pair == 1:\n g_loss = self.combined.train_on_batch([imgs_hr, imgs_lr], [valid, image_features, imgs_lr])\n # Plot the progress\n elapsed_time = datetime.datetime.now() - start_time\n print(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %05f] time: %s \" \\\n % (epoch, epochs,\n batch_i, self.data_loader.n_batches,\n d_loss[0], 100 * d_loss[1], g_loss[0],\n elapsed_time))\n\n g_loss_val = g_loss[0]\n if(g_loss_val < best_test_loss):\n best_test_loss = g_loss_val\n print(\"Best Model Epoch:%d, batch_i:%d, G loss: %f\" % (epoch, batch_i, g_loss_val))\n self.generator.save(\"best_%s.h5\" % self.model_name)\n self.test(epoch, batch_i)\n #self.generator = load_model(\"best_%s.h5\" % self.model_name, custom_objects={'tf': tf}) #name 'tf' is not defined\n #tf.keras.models.save_model(self.generator, \"best_%s.h5\" % self.model_name)\n # D_Loss += d_loss[0] * 100\n # D_Acc += d_loss[1] * 100\n # G_Loss += g_loss[0] * self.weights[0] + g_loss[1] * self.weights[1] + g_loss[2] * self.weights[2]\n\n if batch_i % sample_interval == 0 and batch_i != 0:\n self.sample_images(epoch, batch_i)\n self.sample_images_save(epoch, batch_i)\n\n # if batch_i == (self.total_sample / batch_size) - 1:\n # new_imgs_hr, new_imgs_lr = self.data_loader.load_data(batch_size)\n # fake_lr = self.generator.predict(new_imgs_hr)\n # d_loss_real = self.discriminator.test_on_batch(new_imgs_hr, valid)\n # d_loss_fake = self.discriminator.test_on_batch(fake_lr, fake)\n # d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)\n # new_image_features = self.vgg.predict(new_imgs_hr)\n # if self.f_pair == 1:\n # new_image_features = self.vgg.predict(new_imgs_lr)\n # g_loss = self.combined.test_on_batch([new_imgs_hr, new_imgs_lr], [valid, new_image_features, new_imgs_hr])\n # if self.p_pair == 1:\n # g_loss = self.combined.test_on_batch([new_imgs_hr, new_imgs_lr], [valid, new_image_features, new_imgs_lr])\n #\n # t_epoch.append(epoch)# + (batch_i / (6000 / batch_size)))\n # t_D_Loss.append(d_loss[0] * 100)\n # t_G_Loss.append(g_loss[2])\n #\n # if batch_i == (self.total_sample / batch_size) - 1:\n # #if batch_i % int((6000 / batch_size) / 4) == 0 and not(batch_i == epoch == 0):\n # test_imgs_hr, test_imgs_lr = self.data_loader.load_data(batch_size, is_testing=False)\n # test_fake_lr = self.generator.predict(test_imgs_hr)\n # test_d_loss_real = self.discriminator.test_on_batch(test_imgs_hr, valid)\n # test_d_loss_fake = self.discriminator.test_on_batch(test_fake_lr, fake)\n # test_d_loss = 0.5 * np.add(test_d_loss_real, test_d_loss_fake)\n # test_image_features = self.vgg.predict(test_imgs_hr)\n # if self.f_pair == 1:\n # test_image_features = self.vgg.predict(test_imgs_lr)\n # test_g_loss = self.combined.test_on_batch([test_imgs_hr, test_imgs_lr], [valid, test_image_features, test_imgs_hr])\n # if self.p_pair == 1:\n # test_g_loss = self.combined.test_on_batch([test_imgs_hr, test_imgs_lr], [valid, test_image_features, test_imgs_lr])\n #\n # v_epoch.append(epoch)# + (batch_i / (6000 / batch_size)))\n # v_D_Loss.append(test_d_loss[0] * 100)\n # v_G_Loss.append(test_g_loss[2])\n # # print(\n # # \"GRAPH - [Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %05f] time: %s \" \\\n # # % (epoch, epochs,\n # # batch_i, self.data_loader.n_batches,\n # # d_loss[0], 100 * d_loss[1], g_loss[2],\n # # elapsed_time))\n # if batch_i == (self.total_sample / batch_size) - 1:\n # #if (batch_i % int((6000 / batch_size) / 10) == 0 and not(batch_i == epoch == 0)) or (batch_i % int((6000 / batch_size) / 4) == 0 and not(batch_i == epoch == 0)):\n # plt.clf()\n # plt.subplot(211)\n # plt.plot(t_epoch, t_D_Loss, c='red', label=\"train_D*100\") #marker='^',\n # plt.plot(v_epoch, v_D_Loss, c='orange', label=\"test_D*100\") #marker='v',\n # plt.legend(loc='upper right')\n # plt.grid()\n # plt.xlabel('epoch')\n # plt.ylabel('D_Loss')\n # plt.subplot(212)\n # plt.plot(t_epoch, t_G_Loss, c='blue', label=\"train_G\") #marker='^',\n # plt.plot(v_epoch, v_G_Loss, c='cyan', label=\"test_G\") #marker='v',\n # plt.legend(loc='upper right')\n # plt.grid()\n # plt.xlabel('epoch')\n # plt.ylabel('G_Loss')\n # plt.savefig('images/%s/fig.png' % self.model_name)\n\n test_size = batch_size\n valid = np.ones((test_size,) + self.disc_patch)\n fake = np.zeros((test_size,) + self.disc_patch)\n new_imgs_hr, new_imgs_lr = self.data_loader.load_data(test_size)\n fake_lr, fake_hr = self.generator.predict(new_imgs_hr)\n d_loss_real = self.discriminator.test_on_batch(new_imgs_hr, valid)\n d_loss_fake = self.discriminator.test_on_batch(fake_hr, fake)\n d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)\n new_image_features = []\n new_image_features_lr = []\n if self.f_pair == 0:\n new_image_features = self.vgg.predict(new_imgs_hr)\n new_image_features_lr = self.vgg_lr.predict(new_imgs_lr)\n if self.f_pair == 1:\n new_image_features = self.vgg.predict(new_imgs_lr)\n g_loss = []\n if self.p_pair == 0:\n g_loss = self.combined.test_on_batch([new_imgs_hr, new_imgs_lr],\n [valid, new_image_features, new_image_features])\n if self.p_pair == 1:\n g_loss = self.combined.test_on_batch([new_imgs_hr, new_imgs_lr],\n [valid, new_image_features, new_imgs_lr])\n\n t_epoch.append(epoch) # + (batch_i / (6000 / batch_size)))\n t_D_Loss.append(d_loss[0] * 100)\n t_D_Acc.append(d_loss[1] * 100)\n t_G_Loss.append(g_loss[0])\n # t_D_Loss.append(D_Loss / (self.total_sample / batch_size))\n # t_D_Acc.append(D_Acc / (self.total_sample / batch_size))\n # t_G_Loss.append(G_Loss / (self.total_sample / batch_size))\n # if batch_i % int((6000 / batch_size) / 4) == 0 and not(batch_i == epoch == 0):\n test_imgs_hr, test_imgs_lr = self.data_loader.load_data(test_size, is_testing=False)\n test_fake_lr, test_fake_hr = self.generator.predict(test_imgs_hr)\n test_d_loss_real = self.discriminator.test_on_batch(test_imgs_hr, valid)\n test_d_loss_fake = self.discriminator.test_on_batch(test_fake_hr, fake)\n test_d_loss = 0.5 * np.add(test_d_loss_real, test_d_loss_fake)\n test_image_features = []\n test_image_features_lr = []\n if self.f_pair == 0:\n test_image_features = self.vgg.predict(test_imgs_hr)\n test_image_features_lr = self.vgg_lr.predict(test_imgs_lr)\n if self.f_pair == 1:\n test_image_features = self.vgg.predict(test_imgs_lr)\n\n test_g_loss = []\n if self.p_pair == 0:\n test_g_loss = self.combined.test_on_batch([test_imgs_hr, test_imgs_lr],\n [valid, test_image_features, test_image_features])\n if self.p_pair == 1:\n test_g_loss = self.combined.test_on_batch([test_imgs_hr, test_imgs_lr],\n [valid, test_image_features, test_imgs_lr])\n\n v_epoch.append(epoch) # + (batch_i / (6000 / batch_size)))\n v_D_Loss.append(test_d_loss[0] * 100)\n v_D_Acc.append(test_d_loss[1] * 100)\n v_G_Loss.append(test_g_loss[0])\n plt.clf()\n plt.subplot(311)\n plt.plot(t_epoch, t_D_Loss, c='red', label=\"train_D_loss*100\") # marker='^',\n plt.plot(v_epoch, v_D_Loss, c='orange', label=\"test_D_loss*100\") # marker='v',\n plt.legend(loc='upper right')\n plt.grid()\n plt.xlabel('epoch')\n plt.ylabel('D_Loss')\n plt.subplot(312)\n plt.plot(t_epoch, t_D_Acc, c='red', label=\"train_D_Acc*100\") # marker='^',\n plt.plot(v_epoch, v_D_Acc, c='orange', label=\"test_D_Acc*100\") # marker='v',\n plt.legend(loc='upper right')\n plt.grid()\n plt.xlabel('epoch')\n plt.ylabel('D_Acc')\n plt.subplot(313)\n plt.plot(t_epoch, t_G_Loss, c='blue', label=\"train_G_loss\") # marker='^',\n plt.plot(v_epoch, v_G_Loss, c='cyan', label=\"test_G_loss\") # marker='v',\n plt.legend(loc='upper right')\n plt.grid()\n plt.xlabel('epoch')\n plt.ylabel('G_Loss')\n plt.savefig('images/%s/fig.png' % self.model_name)\n if epoch % 5 == 0 and epoch != 0:\n self.save_models(epoch)\n\n def save_models(self, epoch):\n os.makedirs('saved_model/%s' % self.model_name, exist_ok=True)\n self.discriminator.save(\"./saved_model/{}/Discriminator_{}.h5\".format(self.model_name, epoch))\n self.generator.save(\"./saved_model/{}/Generator_{}.h5\".format(self.model_name, epoch))\n\n def sample_images_save(self, epoch, batch_i, batch_size=1):\n os.makedirs('images/%s' % self.model_name, exist_ok=True)\n os.makedirs('images/%s/%d' % (self.model_name, epoch), exist_ok=True)\n\n imgs_hr, imgs_lr = self.data_loader.load_data(batch_size=batch_size, is_testing=True)\n fakes_lr, fakes_hr = self.generator.predict_on_batch(imgs_hr)\n for idx in range(batch_size):\n # Rescale images 0 - 1\n fakes_lr = 0.5 * fakes_lr + 0.5\n fakes_hr = 0.5 * fakes_hr + 0.5\n imgs_hr = 0.5 * imgs_hr + 0.5\n imgs_lr = 0.5 * imgs_lr + 0.5\n\n img_hr = np.asarray(self.np2img(imgs_hr[idx]).resize((self.hr_height, self.hr_height), self.up_interpolation))\n img_lr = np.asarray(self.np2img(imgs_hr[idx]).resize((self.lr_height, self.lr_height), self.interpolation))\n img_lrhr = np.asarray(Image.fromarray(img_lr).resize((self.hr_height, self.hr_height), self.up_interpolation))\n #img_lrhr = np.asarray(self.np2img(imgs_lr[idx]).resize((self.hr_height, self.hr_height), self.up_interpolation))\n fake_lr = np.asarray(self.np2img(fakes_lr[idx]).resize((self.lr_height, self.lr_height), self.up_interpolation))\n fake_hr = np.asarray(self.np2img(fakes_lr[idx]).resize((self.hr_height, self.hr_height), self.up_interpolation))\n\n compare1 = np.hstack((fake_lr, img_lr))\n #compare1 = np.hstack((compare1, compare1))\n compare1 = np.vstack((img_hr, compare1))\n\n compare2 = np.hstack((img_hr, fake_hr))\n compare2 = np.hstack((compare2, img_lrhr))\n\n Image.fromarray(compare1).save('images/%s/%d/%d_0compare1%d.png' % (self.model_name, epoch, batch_i, idx))\n Image.fromarray(compare2).save('images/%s/%d/%d_0compare2%d.png' % (self.model_name, epoch, batch_i, idx))\n\n\n #self.np2img(imgs_hr[idx]).save('images/%s/%d/%d_1hr%d.png' % (self.model_name, epoch, batch_i, idx))\n #self.np2img(fakes_hr[idx]).save('images/%s/%d/%d_2gen_hr%d.png' % (self.model_name, epoch, batch_i, idx))\n Image.fromarray(img_hr).save('images/%s/%d/%d_1hr%d.png' % (self.model_name, epoch, batch_i, idx))\n Image.fromarray(fake_hr).save('images/%s/%d/%d_2gen_hr%d.png' % (self.model_name, epoch, batch_i, idx))\n Image.fromarray(img_lrhr).save('images/%s/%d/%d_3%s_lrup%d.png' % (self.model_name, epoch, batch_i, self.interpolation_str, idx))\n #self.np2img(fakes_lr[idx]).save('images/%s/%d/%d_4gen_lr%d.png' % (self.model_name, epoch, batch_i, idx))\n Image.fromarray(fake_lr).save('images/%s/%d/%d_4gen_lr%d.png' % (self.model_name, epoch, batch_i, idx))\n Image.fromarray(img_lr).save('images/%s/%d/%d_5%s_lr%d.png' % (self.model_name, epoch, batch_i, self.interpolation_str, idx))\n\n\n def sample_images(self, epoch, batch_i):\n os.makedirs('images/%s' % self.model_name, exist_ok=True)\n os.makedirs('images/%s/%d' % (self.model_name, epoch), exist_ok=True)\n r, c = 2, 3\n\n imgs_hr, imgs_lr = self.data_loader.load_data(batch_size=2, is_testing=True)\n # imgs_lr = []\n # for img in imgs_hr:\n # img_lr = 0.5 * img + 0.5\n # img_lr = np.asarray(self.np2img(img_lr).resize((self.lr_height, self.lr_height), self.interpolation))\n # img_lrhr = np.asarray(Image.fromarray(img_lr).resize((self.hr_height, self.hr_height), self.up_interpolation))\n # imgs_lr.append(img_lrhr)\n # imgs_lr = np.array(imgs_lr) / 127.5 - 1.\n\n fakes_lr, fakes_hr = self.generator.predict(imgs_hr)\n\n # Rescale images 0 - 1\n fakes_lr = 0.5 * fakes_lr + 0.5\n fakes_hr = 0.5 * fakes_hr + 0.5\n imgs_hr = 0.5 * imgs_hr + 0.5\n imgs_lr = 0.5 * imgs_lr + 0.5\n\n # Rescale images 0 - 255\n fakes_lr = (fakes_lr * 255).astype(np.uint8)\n fakes_hr = (fakes_hr * 255).astype(np.uint8)\n imgs_hr = (imgs_hr * 255).astype(np.uint8)\n imgs_lr = (imgs_lr * 255).astype(np.uint8)\n\n # Save generated images and the high resolution originals\n titles = ['Original', 'Generated', self.interpolation_str]\n fig, axs = plt.subplots(r, c)\n cnt = 0\n for row in range(r):\n for col, image in enumerate([imgs_hr, fakes_hr, imgs_lr]):\n axs[row, col].imshow(image[row])\n axs[row, col].set_title(titles[col])\n axs[row, col].axis('off')\n cnt += 1\n\n fig.savefig(\"images/%s/%d/%d.png\" % (self.model_name, epoch, batch_i))\n plt.close()\n\n # Save low resolution images for comparison\n # for i in range(r):\n # fig = plt.figure()\n # plt.imshow(imgs_lr[i])\n # fig.savefig('images/%s/%d/%d_lowres%d.png' % (self.model_name, epoch, batch_i, i))\n # plt.close()\n def test(self, epoch, batch_i):\n os.makedirs('images/%s/best/%d/%d' % (self.model_name, epoch, batch_i), exist_ok=True)\n imgs_hr, imgs_lr = self.data_loader.load_select_data(dataset='./test/*')\n len_hr = len(imgs_hr)\n fakes_lr, fakes_hr = self.generator.predict_on_batch(imgs_hr)\n imgs_hr = (0.5 * imgs_hr + 0.5)\n imgs_lr = (0.5 * imgs_lr + 0.5)\n fakes_lr = (0.5 * fakes_lr + 0.5)\n fakes_hr = (0.5 * fakes_hr + 0.5)\n for i in range(len_hr):\n img_hr = np.asarray(self.np2img(imgs_hr[i]).resize((self.hr_height, self.hr_height), self.up_interpolation))\n img_lr = np.asarray(self.np2img(imgs_hr[i]).resize((self.lr_height, self.lr_height), self.interpolation))\n img_lrhr = np.asarray(Image.fromarray(img_lr).resize((self.hr_height, self.hr_height), self.up_interpolation))\n #img_lrhr = np.asarray(self.np2img(lr[i]).resize((self.hr_height, self.hr_height), self.up_interpolation))\n fake_hr = np.asarray(self.np2img(fakes_lr[i]).resize((self.hr_height, self.hr_height), self.up_interpolation))\n fake_lr = np.asarray(self.np2img(fakes_lr[i]).resize((self.lr_height, self.lr_height), self.up_interpolation))\n\n compare1 = np.hstack((fake_lr, img_lr))\n #compare1 = np.hstack((compare1, compare1))\n compare1 = np.vstack((img_hr, compare1))\n Image.fromarray(compare1).save('images/%s/best/%d/%d/%d_%d_%dcompare1.png' % (self.model_name, epoch, batch_i, epoch, batch_i, i))\n compare2 = np.hstack((img_hr, fake_hr))\n compare2 = np.hstack((compare2, img_lrhr))\n Image.fromarray(compare2).save('images/%s/best/%d/%d/%d_%d_%dcompare2.png' % (self.model_name, epoch, batch_i, epoch, batch_i, i))\n print(\"finish\")\n # result = self.generator.evaluate(hr)\n # print(\"Test Loss, %f, %f, %f\" % (result[0], result[1], result[2]))\n # print(result)\n\n def np2img(self, array):\n return Image.fromarray((array*255).astype(np.uint8))\n\nif __name__ == '__main__':\n gan = SRGAN()\n gan.train(epochs=41, batch_size=16, sample_interval=50)\n", "sub_path": "kes_srgan_pixel.py", "file_name": "kes_srgan_pixel.py", "file_ext": "py", "file_size_in_byte": 35190, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "tensorflow.config.experimental.list_physical_devices", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.config.experimental.set_memory_growth", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.config.experimental.list_logical_devices", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.logging.get_verbosity", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.logging.set_verbosity", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 58, "usage_type": "attribute"}, {"api_name": "PIL.Image.LANCZOS", "line_number": 83, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 83, "usage_type": "name"}, {"api_name": "PIL.Image.BILINEAR", "line_number": 84, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 84, "usage_type": "name"}, {"api_name": "PIL.Image.BILINEAR", "line_number": 87, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 87, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 89, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 89, "usage_type": "name"}, {"api_name": "PIL.Image.HAMMING", "line_number": 91, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 91, "usage_type": "name"}, {"api_name": "PIL.Image.LANCZOS", "line_number": 93, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 93, "usage_type": "name"}, {"api_name": "PIL.Image.BILINEAR", "line_number": 97, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 97, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 99, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 99, "usage_type": "name"}, {"api_name": "PIL.Image.HAMMING", "line_number": 101, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 101, "usage_type": "name"}, {"api_name": "PIL.Image.LANCZOS", "line_number": 103, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 103, "usage_type": "name"}, {"api_name": "keras.optimizers.Adam", "line_number": 106, "usage_type": "call"}, {"api_name": "kes_data_loader.DataLoader", "line_number": 124, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 142, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 150, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 153, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 154, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 167, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 176, "usage_type": "call"}, {"api_name": "keras.applications.VGG19", "line_number": 186, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 191, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 196, "usage_type": "call"}, {"api_name": "keras.applications.VGG19", "line_number": 204, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 209, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 214, "usage_type": "call"}, {"api_name": "tensorflow.image.resize", "line_number": 276, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 276, "usage_type": "attribute"}, {"api_name": "tensorflow.image.resize", "line_number": 280, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 280, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.clip", "line_number": 282, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 282, "usage_type": "attribute"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 286, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 287, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 289, "usage_type": "call"}, {"api_name": "keras.layers.Add", "line_number": 291, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 296, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 297, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 300, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 303, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 304, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 305, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 306, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 307, "usage_type": "call"}, {"api_name": "keras.layers.Lambda", "line_number": 309, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 312, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 314, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 327, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 333, "usage_type": "call"}, {"api_name": "keras.layers.Add", "line_number": 334, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 341, "usage_type": "call"}, {"api_name": "keras.layers.Lambda", "line_number": 345, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 347, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 355, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 356, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 358, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 362, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 373, "usage_type": "call"}, {"api_name": "keras.layers.advanced_activations.LeakyReLU", "line_number": 374, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 375, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 377, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 383, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 383, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 418, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 439, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 439, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 523, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 524, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 529, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 557, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 578, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 578, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 579, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 579, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 580, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 580, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 581, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 581, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 582, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 582, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 583, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 583, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 584, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 584, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 585, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 585, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 586, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 586, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 587, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 587, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 588, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 588, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 589, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 589, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 590, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 590, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 591, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 591, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 592, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 592, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 593, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 593, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 594, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 594, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 595, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 595, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 596, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 596, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 597, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 597, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 598, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 598, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 599, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 599, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 600, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 600, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 605, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 610, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 611, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 622, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 623, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 624, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 624, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 624, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 626, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 627, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 629, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 631, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 633, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 634, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 636, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 636, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 637, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 637, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 642, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 642, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 643, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 643, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 644, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 644, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 646, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 646, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 647, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 647, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 651, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 652, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 673, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 674, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 675, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 676, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 680, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 680, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 690, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 690, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 699, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 708, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 709, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 710, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 710, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 710, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 712, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 713, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 715, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 717, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 718, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 718, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 719, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 720, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 721, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 721, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 728, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 728, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 728, "usage_type": "attribute"}]} +{"seq_id": "478415220", "text": "from collections import Counter\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\n\nfrom corehq.apps import es\nfrom corehq.apps.domain.dbaccessors import get_doc_count_in_domain_by_class\nfrom corehq.apps.dump_reload.couch.dump import DOC_PROVIDERS\nfrom corehq.apps.dump_reload.couch.id_providers import DocTypeIDProvider\nfrom corehq.apps.dump_reload.sql.dump import get_querysets_to_dump, allow_form_processing_queries\nfrom corehq.apps.dump_reload.util import get_model_label\nfrom corehq.apps.hqmedia.models import CommCareMultimedia\nfrom corehq.apps.users.dbaccessors.all_commcare_users import get_web_user_count, get_mobile_user_count\nfrom corehq.apps.users.models import CommCareUser\nfrom corehq.form_processor.backends.sql.dbaccessors import doc_type_to_state\nfrom corehq.form_processor.models import XFormInstanceSQL, CommCareCaseSQL\nfrom corehq.sql_db.config import get_sql_db_aliases_in_use\nfrom corehq.util.couch import get_document_class_by_doc_type\nfrom corehq.util.markup import shell_red\n\nDOC_TYPE_MAPPING = {\n 'xforminstance': 'XFormInstance',\n 'submissionerrorlog': 'SubmissionErrorLog',\n 'xformduplicate': 'XFormDuplicate',\n 'xformerror': 'XFormError',\n 'xformarchived': 'XFormArchived',\n}\n\n\nclass Command(BaseCommand):\n help = \"Print database stats for a domain. Use in conjunction with 'compare_docs_with_es'.\"\n args = ''\n\n def add_arguments(self, parser):\n parser.add_argument('--csv', action='store_true', default=False, dest='csv',\n help='Write output in CSV format.')\n\n def handle(self, domain, **options):\n csv = options.get('csv')\n\n couch_counts = _map_doc_types(_get_couchdb_counts(domain))\n sql_counts = _map_doc_types(_get_sql_counts(domain))\n es_counts = _map_doc_types(_get_es_counts(domain))\n all_doc_types = set(couch_counts) | set(sql_counts) | set(es_counts)\n\n output_rows = []\n for doc_type in sorted(all_doc_types, key=lambda d: d.lower()):\n couch = couch_counts.get(doc_type, '')\n sql = sql_counts.get(doc_type, '')\n es = es_counts.get(doc_type, '')\n output_rows.append((doc_type, couch, sql, es))\n\n if csv:\n self.output_csv(output_rows)\n else:\n self.output_table(output_rows)\n\n def output_table(self, output_rows):\n template = \"{:<50} | {:<20} | {:<20} | {:<20}\"\n self._write_output(template, output_rows)\n\n def output_csv(self, output_rows):\n template = \"{},{},{},{}\\n\"\n self._write_output(template, output_rows, with_header_divider=False)\n\n def _write_output(self, template, output_rows, with_header_divider=True, with_color=True):\n self.stdout.write(template.format('Doc Type', 'Couch', 'SQL', 'ES'))\n if with_header_divider:\n self.stdout.write(template.format('-' * 50, *['-' * 20] * 3))\n for doc_type, couch_count, sql_count, es_count in output_rows:\n row_template = template\n couch_dff = couch_count and couch_count != es_count\n sql_diff = sql_count and sql_count != es_count\n if with_color and es_count and (couch_dff or sql_diff):\n row_template = shell_red(template)\n self.stdout.write(row_template.format(doc_type, couch_count, sql_count, es_count))\n\n\ndef _get_couchdb_counts(domain):\n couch_db_counts = Counter()\n for provider in DOC_PROVIDERS:\n if isinstance(provider, DocTypeIDProvider):\n for doc_type in provider.doc_types:\n if doc_type == 'CommCareUser':\n continue # want to split deleted\n doc_class = get_document_class_by_doc_type(doc_type)\n count = get_doc_count_in_domain_by_class(domain, doc_class)\n couch_db_counts.update({doc_type: count})\n\n for row in CommCareMultimedia.get_db().view('hqmedia/by_domain', key=domain, include_docs=False):\n couch_db_counts.update(['CommCareMultimedia'])\n\n mobile_user_count = get_mobile_user_count(domain)\n couch_db_counts.update({\n 'WebUser': get_web_user_count(domain),\n 'CommCareUser': mobile_user_count,\n 'CommCareUser-Deleted': get_doc_count_in_domain_by_class(domain, CommCareUser) - mobile_user_count\n })\n\n # this is very slow, excluding for now\n # for _, doc_ids in SyncLogIDProvider().get_doc_ids(domain):\n # couch_db_counts['SyncLog'] += len(doc_ids)\n #\n return couch_db_counts\n\n\ndef _get_doc_counts_for_couch_db(couch_db, domain):\n doc_types = couch_db.view(\n \"by_domain_doc_type_date/view\",\n startkey=[domain],\n endkey=[domain, {}],\n reduce=True,\n group=True,\n group_level=2\n )\n\n return Counter({row['key'][1]: row['value'] for row in doc_types})\n\n\n@allow_form_processing_queries()\ndef _get_sql_counts(domain):\n counter = Counter()\n for model_class, queryset in get_querysets_to_dump(domain, []):\n if model_class in (User, XFormInstanceSQL, CommCareCaseSQL):\n continue # User is very slow, others we want to break out\n counter[get_model_label(model_class)] += queryset.count()\n\n counter += _get_sql_forms_by_doc_type(domain)\n counter += _get_sql_cases_by_doc_type(domain)\n return counter\n\n\ndef _get_es_counts(domain):\n counter = Counter()\n for es_query in (es.CaseES, es.FormES, es.UserES, es.AppES, es.LedgerES, es.GroupES):\n counter += _get_index_counts(es_query(), domain)\n\n return counter\n\n\ndef _get_index_counts(es_query, domain):\n return Counter(\n es_query\n .remove_default_filters()\n .filter(es.filters.term('domain', domain))\n .terms_aggregation('doc_type', 'doc_type')\n .size(0)\n .run()\n .aggregations.doc_type.counts_by_bucket()\n )\n\n\ndef _map_doc_types(counter):\n return Counter({\n DOC_TYPE_MAPPING.get(doc_type, doc_type): count\n for doc_type, count in counter.items()\n })\n\n\ndef _get_sql_forms_by_doc_type(domain):\n counter = Counter()\n for db_alias in get_sql_db_aliases_in_use():\n queryset = XFormInstanceSQL.objects.using(db_alias).filter(domain=domain)\n for doc_type, state in doc_type_to_state.items():\n counter[doc_type] += queryset.filter(state=state).count()\n\n where_clause = 'state & {0} = {0}'.format(XFormInstanceSQL.DELETED)\n counter['XFormInstance-Deleted'] += queryset.extra(where=[where_clause]).count()\n\n return counter\n\n\ndef _get_sql_cases_by_doc_type(domain):\n counter = Counter()\n for db_alias in get_sql_db_aliases_in_use():\n queryset = CommCareCaseSQL.objects.using(db_alias).filter(domain=domain)\n counter['CommCareCase'] += queryset.filter(deleted=False).count()\n counter['CommCareCase-Deleted'] += queryset.filter(deleted=True).count()\n\n return counter\n", "sub_path": "corehq/apps/dump_reload/management/commands/print_domain_stats.py", "file_name": "print_domain_stats.py", "file_ext": "py", "file_size_in_byte": 6898, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 30, "usage_type": "name"}, {"api_name": "corehq.apps.es", "line_number": 50, "usage_type": "name"}, {"api_name": "corehq.apps.es", "line_number": 51, "usage_type": "name"}, {"api_name": "corehq.util.markup.shell_red", "line_number": 75, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 80, "usage_type": "call"}, {"api_name": "corehq.apps.dump_reload.couch.dump.DOC_PROVIDERS", "line_number": 81, "usage_type": "name"}, {"api_name": "corehq.apps.dump_reload.couch.id_providers.DocTypeIDProvider", "line_number": 82, "usage_type": "argument"}, {"api_name": "corehq.util.couch.get_document_class_by_doc_type", "line_number": 86, "usage_type": "call"}, {"api_name": "corehq.apps.domain.dbaccessors.get_doc_count_in_domain_by_class", "line_number": 87, "usage_type": "call"}, {"api_name": "corehq.apps.hqmedia.models.CommCareMultimedia.get_db", "line_number": 90, "usage_type": "call"}, {"api_name": "corehq.apps.hqmedia.models.CommCareMultimedia", "line_number": 90, "usage_type": "name"}, {"api_name": "corehq.apps.users.dbaccessors.all_commcare_users.get_mobile_user_count", "line_number": 93, "usage_type": "call"}, {"api_name": "corehq.apps.users.dbaccessors.all_commcare_users.get_web_user_count", "line_number": 95, "usage_type": "call"}, {"api_name": "corehq.apps.domain.dbaccessors.get_doc_count_in_domain_by_class", "line_number": 97, "usage_type": "call"}, {"api_name": "corehq.apps.users.models.CommCareUser", "line_number": 97, "usage_type": "argument"}, {"api_name": "collections.Counter", "line_number": 117, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 122, "usage_type": "call"}, {"api_name": "corehq.apps.dump_reload.sql.dump.get_querysets_to_dump", "line_number": 123, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 124, "usage_type": "name"}, {"api_name": "corehq.form_processor.models.XFormInstanceSQL", "line_number": 124, "usage_type": "name"}, {"api_name": "corehq.form_processor.models.CommCareCaseSQL", "line_number": 124, "usage_type": "name"}, {"api_name": "corehq.apps.dump_reload.util.get_model_label", "line_number": 126, "usage_type": "call"}, {"api_name": "corehq.apps.dump_reload.sql.dump.allow_form_processing_queries", "line_number": 120, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 134, "usage_type": "call"}, {"api_name": "corehq.apps.es.CaseES", "line_number": 135, "usage_type": "attribute"}, {"api_name": "corehq.apps.es", "line_number": 135, "usage_type": "name"}, {"api_name": "corehq.apps.es.FormES", "line_number": 135, "usage_type": "attribute"}, {"api_name": "corehq.apps.es.UserES", "line_number": 135, "usage_type": "attribute"}, {"api_name": "corehq.apps.es.AppES", "line_number": 135, "usage_type": "attribute"}, {"api_name": "corehq.apps.es.LedgerES", "line_number": 135, "usage_type": "attribute"}, {"api_name": "corehq.apps.es.GroupES", "line_number": 135, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 142, "usage_type": "call"}, {"api_name": "corehq.apps.es.filters.term", "line_number": 145, "usage_type": "call"}, {"api_name": "corehq.apps.es.filters", "line_number": 145, "usage_type": "attribute"}, {"api_name": "corehq.apps.es", "line_number": 145, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 154, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 161, "usage_type": "call"}, {"api_name": "corehq.sql_db.config.get_sql_db_aliases_in_use", "line_number": 162, "usage_type": "call"}, {"api_name": "corehq.form_processor.models.XFormInstanceSQL.objects.using", "line_number": 163, "usage_type": "call"}, {"api_name": "corehq.form_processor.models.XFormInstanceSQL.objects", "line_number": 163, "usage_type": "attribute"}, {"api_name": "corehq.form_processor.models.XFormInstanceSQL", "line_number": 163, "usage_type": "name"}, {"api_name": "corehq.form_processor.backends.sql.dbaccessors.doc_type_to_state.items", "line_number": 164, "usage_type": "call"}, {"api_name": "corehq.form_processor.backends.sql.dbaccessors.doc_type_to_state", "line_number": 164, "usage_type": "name"}, {"api_name": "corehq.form_processor.models.XFormInstanceSQL.DELETED", "line_number": 167, "usage_type": "attribute"}, {"api_name": "corehq.form_processor.models.XFormInstanceSQL", "line_number": 167, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 174, "usage_type": "call"}, {"api_name": "corehq.sql_db.config.get_sql_db_aliases_in_use", "line_number": 175, "usage_type": "call"}, {"api_name": "corehq.form_processor.models.CommCareCaseSQL.objects.using", "line_number": 176, "usage_type": "call"}, {"api_name": "corehq.form_processor.models.CommCareCaseSQL.objects", "line_number": 176, "usage_type": "attribute"}, {"api_name": "corehq.form_processor.models.CommCareCaseSQL", "line_number": 176, "usage_type": "name"}]} +{"seq_id": "218783342", "text": "import cv2\nimport numpy as np\nimport os\n\n##########################################################################\n# from https://stackoverflow.com/questions/28717054/calculating-sharpness-of-an-image\ndef getBlurValue(image):\n canny = cv2.Canny(image, 50,250)\n return np.mean(canny)\n##########################################################################\ndef getSimilarity(img1,img2):\n return cv2.matchTemplate(frame, last_image, cv2.TM_CCOEFF_NORMED)[0][0]\n##########################################################################\n# theses parameters can be changed\nproba_similarity_match=0.9\nvideo_file='video_test1.mp4'\nmin_sharpness=4\nout_folder='sharp_still_frames'\nstill_duration=5 # 5 frames still needed\n\nprint(\"Extracting sharp still frames from video...\")\n# set video file path of input video with name and extension\nvid_capture = cv2.VideoCapture(video_file)\nif vid_capture.isOpened()==False:\n print(\"Video File not found: \"+ video_file)\n quit()\n\nif not os.path.exists(out_folder):\n os.makedirs(out_folder)\n\nlast_image=None\nindex=-1\nnb_frames_still=0\nstill_image_ref=None\nmust_save=False\nlast_sharpness=0\nwhile(True):\n index+=1\n ret,frame = vid_capture.read()\n if not ret:\n break\n\n if last_image is None:\n last_image=frame\n\n sharpness=getBlurValue(frame)\n sharpness_good=sharpness>min_sharpness\n still_good=getSimilarity(frame, last_image)>proba_similarity_match\n\n if sharpness_good and still_good:\n nb_frames_still+=1\n must_save=True\n if last_sharpnessstill_duration:\n name = './' + out_folder +'/frame_' + str(index) + '.jpg'\n print ('Saving...' + name)\n cv2.imwrite(name, image_to_save)\n must_save=False\n nb_frames_still=0\n\n last_image=frame\n last_sharpness=sharpness\n\nprint(\"End of extraction.\")", "sub_path": "python/video_extract_sharp_still_frames.py", "file_name": "video_extract_sharp_still_frames.py", "file_ext": "py", "file_size_in_byte": 1941, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "cv2.Canny", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.matchTemplate", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.TM_CCOEFF_NORMED", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "511550136", "text": "import os\nimport gitlab\nfrom csv import DictWriter\n\ngl = gitlab.Gitlab(\n \"\",\n private_token=\"\",\n ssl_verify=\"\",\n)\n\n\ndef list_projects():\n res_project = []\n projects = gl.projects.list(all=True)\n for project in projects:\n res_project.append(\n {\n \"Project_ID\": project.id,\n \"Name\": project.name,\n \"Creator_ID\": project.creator_id,\n \"Namespace\": project.name_with_namespace,\n \"Created_at\": project.created_at,\n \"Last_Activity\": project.last_activity_at,\n }\n )\n return res_project\n\n\ndef list_users():\n users = gl.users.list()\n res_user = []\n for user in users:\n res_user.append(\n {\n \"ID\": user.id,\n \"name\": user.name,\n \"two_factor_enabled\": user.two_factor_enabled,\n }\n )\n return res_user\n\n\nprojects = list_projects()\nwith open(\"gitlab-projects.csv\", \"w\") as file:\n headers = [\n \"Project_ID\",\n \"Name\",\n \"Creator_ID\",\n \"Namespace\",\n \"Created_at\",\n \"Last_Activity\",\n ]\n text = DictWriter(file, fieldnames=headers)\n text.writeheader()\n for p in projects:\n text.writerow(p)\n\n\nusers = list_users()\nwith open(\"gitlab-users.csv\", \"w\") as file:\n headers = [\"ID\", \"name\", \"two_factor_enabled\"]\n text = DictWriter(file, fieldnames=headers)\n text.writeheader()\n for u in users:\n text.writerow(u)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1557, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "gitlab.Gitlab", "line_number": 5, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 53, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "540438098", "text": "\nfrom github_webhook import Webhook\nfrom flask import Flask\nfrom noises.handler import Handler\nimport rq_dashboard\n\n\napp = Flask(__name__) # Standard Flask app\napp.config.from_object(rq_dashboard.default_settings)\napp.register_blueprint(rq_dashboard.blueprint, url_prefix=\"/rq\")\nwebhook = Webhook(app) # Defines '/postreceive' endpoint\nhandlers = Handler()\n\n\n@app.route(\"/\") # Standard Flask endpoint\ndef hello_world():\n return \"Hello, World!\"\n\n\n@webhook.hook(event_type=\"status\") # Defines a handler for the 'push' event\ndef on_status(data):\n handlers.status(data, True)\n\n@webhook.hook(event_type=\"star\")\ndef on_star(data):\n handlers.star(data, True)\n\n@webhook.hook(event_type=\"pull_request_review_comment\")\ndef on_pull_request_review_comment(data):\n handlers.pull_request_review_comment(data, True)\n\n@webhook.hook(event_type=\"push\")\ndef on_push(data):\n handlers.push(data, True)\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8080)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 978, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "rq_dashboard.default_settings", "line_number": 9, "usage_type": "attribute"}, {"api_name": "rq_dashboard.blueprint", "line_number": 10, "usage_type": "attribute"}, {"api_name": "github_webhook.Webhook", "line_number": 11, "usage_type": "call"}, {"api_name": "noises.handler.Handler", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "266923624", "text": "import nltk.classify\nfrom tkinter import *\nimport joblib\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nimport pandas as pd\nimport numpy as np\n\ndef convert(polarity):\n\tif polarity == 1 :\n\t\treturn 'positive'\n\telif polarity == 0:\n\t\treturn 'neutral'\n\telse:\n\t\treturn \"negative\"\n\n\nprint(\"Designing UI\")\nroot = Tk()\nroot.wm_title('Sentiment Analysis Application')\n\ntop_frame = Frame(root)\ntop_frame.pack()\n\nbottom_frame = Frame(root)\nbottom_frame.pack(side=BOTTOM)\n\nl1 = Label(top_frame, text='Enter a review:')\nl1.pack(side=LEFT)\n\nw = Text(top_frame, height=3 )\nw.pack(side=LEFT)\n\nprint(\"UI COMPLETE\")\nprint()\ndef main_op():\n review_spirit = w.get('1.0',END)\n clf = joblib.load(\"svm.pkl\")\n\n demo2 = ('review is ' + str(convert(clf.predict([review_spirit]))))\n l2 = Label(bottom_frame, text=demo2)\n l2.pack()\n\nbutton = Button(bottom_frame, text='Analyse', command=main_op )\nbutton.pack(side=BOTTOM)\n\nroot.mainloop()", "sub_path": "sentiment-analysis/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1000, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "joblib.load", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "116137762", "text": "\"\"\"\nNotice : 神兽保佑 ,测试一次通过\n// \n// ┏┛ ┻━━━━━┛ ┻┓\n// ┃       ┃\n// ┃   ━   ┃\n// ┃ ┳┛  ┗┳ ┃\n// ┃       ┃\n// ┃   ┻   ┃\n// ┃       ┃\n// ┗━┓   ┏━━━┛\n// ┃   ┃ Author: somewheve\n// ┃   ┃ Datetime: 2019/6/13 下午7:54 ---> 无知即是罪恶\n// ┃   ┗━━━━━━━━━┓\n// ┃        ���┓\n// ┃     ┏┛\n// ┗━┓ ┓ ┏━━━┳ ┓ ┏━┛\n// ┃ ┫ ┫ ┃ ┫ ┫\n// ┗━┻━┛ ┗━┻━┛\n//\n\"\"\"\nfrom collections import defaultdict\n\nfrom ctpbee.constant import *\nfrom ctpbee.interface.ctp_rohon.lib import *\nfrom ctpbee.interface.func import *\n\n\nclass RHTdApi(RohonTdApi):\n \"\"\"\"\"\"\n\n def __init__(self, app_signal):\n \"\"\"Constructor\"\"\"\n super(RHTdApi, self).__init__()\n self.app_signal = app_signal\n self.gateway_name = \"ctp_rohon\"\n\n self.reqid = 0\n\n self.connect_status = False\n self.login_status = False\n self.auth_staus = False\n self.login_failed = False\n\n self.userid = \"\"\n self.password = \"\"\n self.brokerid = 0\n self.auth_code = \"\"\n self.product_info = \"\"\n self.appid = \"\"\n\n self.frontid = 0\n self.sessionid = 0\n\n self.order_data = []\n self.trade_data = []\n self.positions = {}\n\n self.choices = list(range(50000, 1000000))\n self.order_ref = 0\n\n self.symbol_exchange_mapping = {}\n self.sysid_orderid_map = {}\n self.open_cost_dict = defaultdict(dict)\n\n self.position_init_flag = False\n self.instrunment_init_flag = False\n self.position_instrument_mapping = dict()\n\n self.init_status = False\n self.contact_data = {}\n self.local_order_id = []\n\n @property\n def td_status(self):\n return self.login_status\n\n def on_event(self, type, data):\n event = Event(type=type, data=data)\n signal = getattr(self.app_signal, f\"{type}_signal\")\n signal.send(event)\n\n def onFrontConnected(self):\n \"\"\"\"\"\"\n self.connect_status = True\n self.on_event(type=EVENT_LOG, data=\"交易连接成功\")\n\n if self.auth_code:\n self.authenticate()\n else:\n self.login()\n\n def onFrontDisconnected(self, reason: int):\n \"\"\"\"\"\"\n self.connect_status = False\n self.login_status = False\n self.on_event(type=EVENT_LOG, data=f\"交易连接断开,原因{reason}\")\n\n def onRspAuthenticate(self, data: dict, error: dict, reqid: int, last: bool):\n \"\"\"\"\"\"\n if not error['ErrorID']:\n self.authStatus = True\n self.on_event(type=EVENT_LOG, data=\"交易服务器验证成功\")\n self.login()\n else:\n error['detail'] = \"交易服务器验证失败\"\n self.on_event(type=EVENT_ERROR, data=error)\n\n def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):\n \"\"\"\"\"\"\n if not error[\"ErrorID\"]:\n self.frontid = data[\"FrontID\"]\n self.sessionid = data[\"SessionID\"]\n self.login_status = True\n self.on_event(type=EVENT_LOG, data=\"交易登录成功\")\n\n # Confirm settlement\n req = {\n \"BrokerID\": self.brokerid,\n \"InvestorID\": self.userid\n }\n self.reqid += 1\n\n self.reqSettlementInfoConfirm(req, self.reqid)\n else:\n self.login_failed = True\n error['detail'] = \"交易登录失败\"\n self.on_event(type=EVENT_ERROR, data=error)\n\n def onRspOrderInsert(self, data: dict, error: dict, reqid: int, last: bool):\n \"\"\"\"\"\"\n order_ref = data[\"OrderRef\"]\n order_id = f\"{self.frontid}_{self.sessionid}_{order_ref}\"\n symbol = data[\"InstrumentID\"]\n exchange = symbol_exchange_map[symbol]\n order = OrderData(\n symbol=symbol,\n exchange=exchange,\n order_id=order_id,\n direction=DIRECTION_CTP2VT[data[\"Direction\"]],\n offset=OFFSET_CTP2VT[data[\"CombOffsetFlag\"]],\n price=data[\"LimitPrice\"],\n volume=data[\"VolumeTotalOriginal\"],\n status=Status.REJECTED,\n gateway_name=self.gateway_name\n )\n self.on_event(type=EVENT_ORDER, data=order)\n error['detail'] = \"交易委托失败\"\n self.on_event(type=EVENT_ERROR, data=error)\n\n def onRspOrderAction(self, data: dict, error: dict, reqid: int, last: bool):\n \"\"\"\"\"\"\n error['detail'] = \"交易撤单失败\"\n self.on_event(type=EVENT_ERROR, data=error)\n\n def onRspQueryMaxOrderVolume(self, data: dict, error: dict, reqid: int, last: bool):\n \"\"\"\"\"\"\n pass\n\n def onRspSettlementInfoConfirm(self, data: dict, error: dict, reqid: int, last: bool):\n \"\"\"\n Callback of settlment info confimation.\n \"\"\"\n self.on_event(type=EVENT_LOG, data=\"结算信息确认成功\")\n self.reqid += 1\n self.reqQryInstrument({}, self.reqid)\n\n def onRspQryInvestorPosition(self, data: dict, error: dict, reqid: int, last: bool):\n \"\"\"\"\"\"\n if not data:\n return\n key = f\"{data['InstrumentID'], data['PosiDirection']}\"\n position = self.positions.get(key, None)\n try:\n if not position:\n position = PositionData(\n symbol=data[\"InstrumentID\"],\n exchange=symbol_exchange_map[data[\"InstrumentID\"]],\n direction=DIRECTION_CTP2VT[data[\"PosiDirection\"]],\n gateway_name=self.gateway_name\n )\n self.positions[key] = position\n # For SHFE position data update\n if position.exchange == Exchange.SHFE:\n if data[\"YdPosition\"] and not data[\"TodayPosition\"]:\n # position.yd_volume = data[\"Position\"]\n position.__set_hole__(\"yd_volume\", data[\"Position\"])\n # For other exchange position data update\n else:\n # position.yd_volume = data[\"Position\"] - data[\"TodayPosition\"]\n position.__set_hole__(\"yd_volume\", data[\"Position\"] - data[\"TodayPosition\"])\n\n # Get contract size (spread contract has no size value)\n size = symbol_size_map.get(position.symbol, 0)\n\n # Calculate previous position cost\n cost = position.price * position.volume * size\n\n # Update new position volume\n # position.volume += data[\"Position\"]\n position.__set_hole__(\"volume\", position.volume + data[\"Position\"])\n # position.pnl += data[\"PositionProfit\"]\n position.__set_hole__(\"pnl\", position.pnl + data[\"PositionProfit\"])\n\n # Calculate average position price\n if position.volume and size:\n cost += data[\"PositionCost\"]\n # position.price = cost / (position.volume * size)\n position.__set_hole__(\"price\", cost / (position.volume * size))\n self.open_cost_dict[position.symbol][\"size\"] = size\n\n # Get frozen volume\n if position.direction == Direction.LONG:\n # position.frozen += data[\"ShortFrozen\"]\n position.__set_hole__(\"frozen\", position.frozen + data[\"ShortFrozen\"])\n\n if position.volume and size:\n if not self.open_cost_dict[position.symbol].get(\"long\"):\n self.open_cost_dict[position.symbol][\"long\"] = 0\n\n self.open_cost_dict[position.symbol][\"long\"] += data['OpenCost']\n # position.open_price = self.open_cost_dict[position.symbol][\"long\"] / (\n # position.volume * size)\n position.__set_hole__(\"open_price\", self.open_cost_dict[position.symbol][\"long\"] / (\n position.volume * size))\n # 先算出当前的最新价格\n current_price = position.pnl / \\\n (size * position.volume) + position.price\n\n # position.float_pnl = (current_price - position.open_price) * size * position.volume\n position.__set_hole__(\"float_pnl\", (current_price - position.open_price) * size * position.volume)\n\n else:\n # position.frozen += data[\"LongFrozen\"]\n position.__set_hole__(\"frozen\", position.frozen + data[\"LongFrozen\"])\n\n if position.volume and size:\n if not self.open_cost_dict[position.symbol].get(\"short\"):\n self.open_cost_dict[position.symbol][\"short\"] = 0\n\n self.open_cost_dict[position.symbol][\"short\"] += data['OpenCost']\n # position.open_price = self.open_cost_dict[position.symbol][\"short\"] / (\n # position.volume * size)\n position.__set_hole__(\"open_price\", self.open_cost_dict[position.symbol][\"short\"] / (\n position.volume * size))\n current_price = position.price - \\\n position.pnl / (size * position.volume)\n # position.float_pnl = (position.open_price - current_price) * size * position.volume\n position.__set_hole__(\"float_pnl\", (position.open_price - current_price) * size * position.volume)\n\n except KeyError:\n pass\n\n if last:\n for position in self.positions.values():\n self.on_event(type=EVENT_POSITION, data=position)\n self.position_instrument_mapping[position.local_symbol] = False\n self.positions.clear()\n self.open_cost_dict.clear()\n self.position_init_flag = True\n\n def onRspQryTradingAccount(self, data: dict, error: dict, reqid: int, last: bool):\n \"\"\"\"\"\"\n account = AccountData(\n accountid=data[\"AccountID\"],\n balance=data[\"Balance\"],\n frozen=data[\"FrozenMargin\"] +\n data[\"FrozenCash\"] + data[\"FrozenCommission\"],\n gateway_name=self.gateway_name,\n available=data[\"Available\"]\n )\n self.on_event(type=EVENT_ACCOUNT, data=account)\n if self.instrunment_init_flag and self.position_init_flag and not self.init_status:\n self.reqid += 1\n self.init_status = True\n self.reqQryDepthMarketData({}, self.reqid)\n self.on_event(type=EVENT_INIT_FINISHED, data=True)\n\n def onRspQryInstrument(self, data: dict, error: dict, reqid: int, last: bool):\n \"\"\"\n Callback of instrument query.\n \"\"\"\n product = PRODUCT_CTP2VT.get(data[\"ProductClass\"], None)\n try:\n end_delivery_date = datetime.strptime(\n data[\"EndDelivDate\"], \"%Y%m%d\"),\n start_delivery_date = datetime.strptime(\n data[\"StartDelivDate\"], \"%Y%m%d\"),\n open_date = datetime.strptime(data['OpenDate'], \"%Y%m%d\"),\n is_trading = bool(data[\"IsTrading\"]),\n create_date = datetime.strptime(data['CreateDate'], \"%Y%m%d\")\n except ValueError:\n end_delivery_date = None\n start_delivery_date = None\n open_date = None\n is_trading = None\n create_date = None\n\n if product:\n try:\n # For option only\n if product == Product.OPTION:\n option_underlying = data[\"UnderlyingInstrID\"],\n option_type = OPTIONTYPE_CTP2VT.get(\n data[\"OptionsType\"], None),\n option_strike = data[\"StrikePrice\"],\n option_expiry = datetime.strptime(\n data[\"ExpireDate\"], \"%Y%m%d\"),\n else:\n option_strike: float = 0\n option_underlying: str = \"\"\n option_type: OptionType = None\n option_expiry: datetime = None\n contract = ContractData(\n symbol=data[\"InstrumentID\"],\n exchange=EXCHANGE_CTP2VT[data[\"ExchangeID\"]],\n name=data[\"InstrumentName\"],\n product=product,\n max_market_order_volume=data['MaxMarketOrderVolume'],\n min_market_order_volume=data['MinMarketOrderVolume'],\n max_limit_order_volume=data['MaxLimitOrderVolume'],\n min_limit_order_volume=data['MaxLimitOrderVolume'],\n size=data[\"VolumeMultiple\"],\n pricetick=data[\"PriceTick\"],\n delivery_month=data['DeliveryMonth'],\n delivery_year=data['DeliveryYear'],\n long_margin_ratio=data['LongMarginRatio'],\n short_margin_ratio=data['ShortMarginRatio'],\n combination_type=data['CombinationType'],\n gateway_name=self.gateway_name,\n end_delivery_date=end_delivery_date,\n start_delivery_date=start_delivery_date,\n open_date=open_date,\n is_trading=is_trading,\n create_date=create_date,\n option_strike=option_strike,\n option_underlying=option_underlying,\n option_type=option_type,\n option_expiry=option_expiry\n )\n except KeyError as e:\n import warnings\n warnings.warn(f\"未预料到的合约问题 错误信息: {e}\")\n return\n self.symbol_exchange_mapping[data[\"InstrumentID\"]\n ] = EXCHANGE_CTP2VT[data[\"ExchangeID\"]]\n\n self.on_event(type=EVENT_CONTRACT, data=contract)\n\n symbol_exchange_map[contract.symbol] = contract.exchange\n symbol_name_map[contract.symbol] = contract.name\n symbol_size_map[contract.symbol] = contract.size\n\n if last:\n # 请求计算所有合约所用到的具体数据\n self.instrunment_init_flag = True\n self.on_event(EVENT_LOG, data=\"合约信息查询成功\")\n\n for data in self.order_data:\n self.onRtnOrder(data)\n self.order_data.clear()\n for data in self.trade_data:\n self.onRtnTrade(data)\n self.trade_data.clear()\n\n def onRtnOrder(self, data: dict):\n \"\"\"\n Callback of order status update.\n \"\"\"\n symbol = data[\"InstrumentID\"]\n exchange = symbol_exchange_map.get(symbol, \"\")\n if not exchange:\n self.order_data.append(data)\n return\n\n frontid = data[\"FrontID\"]\n sessionid = data[\"SessionID\"]\n order_ref = data[\"OrderRef\"]\n if int(order_ref) > self.order_ref:\n self.order_ref = int(order_ref) + 1\n order_id = f\"{frontid}_{sessionid}_{order_ref}\"\n if data['OrderPriceType'] in ORDERTYPE_VT2CTP.values():\n ordertype = ORDERTYPE_CTP2VT[data[\"OrderPriceType\"]]\n else:\n ordertype = \"non_support\"\n is_local = True if int(self.frontid) == int(frontid) and int(\n self.sessionid) == int(sessionid) else False\n\n if is_local:\n self.local_order_id.append(order_id)\n\n order = OrderData(\n symbol=symbol,\n exchange=exchange,\n order_id=order_id,\n type=ordertype,\n direction=DIRECTION_CTP2VT[data[\"Direction\"]],\n offset=OFFSET_CTP2VT[data[\"CombOffsetFlag\"]],\n price=data[\"LimitPrice\"],\n volume=data[\"VolumeTotalOriginal\"],\n traded=data[\"VolumeTraded\"],\n status=STATUS_CTP2VT[data[\"OrderStatus\"]],\n time=data[\"InsertTime\"],\n gateway_name=self.gateway_name,\n is_local=is_local\n )\n self.on_event(type=EVENT_ORDER, data=order)\n self.sysid_orderid_map[data[\"OrderSysID\"]] = order_id\n\n def onRtnTrade(self, data: dict):\n \"\"\"\n Callback of trade status update.\n \"\"\"\n symbol = data[\"InstrumentID\"]\n exchange = symbol_exchange_map.get(symbol, \"\")\n if not exchange:\n self.trade_data.append(data)\n return\n\n order_id = self.sysid_orderid_map[data[\"OrderSysID\"]]\n is_local = order_id in self.local_order_id\n\n trade = TradeData(\n symbol=symbol,\n exchange=exchange,\n order_id=order_id,\n tradeid=data[\"TradeID\"],\n direction=DIRECTION_CTP2VT[data[\"Direction\"]],\n offset=OFFSET_CTP2VT[data[\"OffsetFlag\"]],\n price=data[\"Price\"],\n volume=data[\"Volume\"],\n time=data[\"TradeTime\"],\n is_local=is_local,\n gateway_name=self.gateway_name\n )\n self.on_event(type=EVENT_TRADE, data=trade)\n\n def connect(self, info: dict):\n \"\"\"\n Start connection to server.\n \"\"\"\n self.userid = info.get(\"userid\")\n self.password = info.get(\"password\")\n self.brokerid = info.get(\"brokerid\")\n self.auth_code = info.get(\"auth_code\")\n self.appid = info.get(\"appid\")\n self.product_info = info.get(\"product_info\")\n\n subscribe_info = info.get(\"subscribe_topic\", (0, 0)) # 默认采用(0, 0)的方式进行订阅\n\n if not self.connect_status:\n path = get_folder_path(\n self.gateway_name.lower() + f\"/{self.userid}\")\n self.createFtdcTraderApi(str(path) + \"\\\\Td\")\n self.subscribePrivateTopic(subscribe_info[0])\n self.subscribePublicTopic(subscribe_info[1])\n self.registerFront(info.get(\"td_address\"))\n self.init()\n else:\n self.authenticate()\n\n def authenticate(self):\n \"\"\"\n Authenticate with auth_code and appid.\n \"\"\"\n req = {\n \"UserID\": self.userid,\n \"BrokerID\": self.brokerid,\n \"AuthCode\": self.auth_code,\n \"AppID\": self.appid\n }\n if self.product_info:\n req[\"UserProductInfo\"] = self.product_info\n\n self.reqid += 1\n self.reqAuthenticate(req, self.reqid)\n\n def onRspQryTransferBank(self, data, error, reqid, last: bool):\n print(\"transfer callback: \", data)\n\n def onRspQryTransferSerial(self, data, error, reqid, last):\n # 查询流水回调\n print(\"serial: \", data, \"error\", error)\n\n def onRspQryAccountregister(self, data, error, reqid, last):\n print(\"query account register callback: data\", data, \"error\")\n\n def query_transfer_serial(self, req: TransferSerialRequest):\n \"\"\" 查询转账流水 \"\"\"\n self.reqid += 1\n reqd = {\n \"BankID\": req.bank_id,\n \"CurrencyID\": req.currency_id\n }\n self.ReqQryTransferSerial(reqd, self.reqid)\n\n def query_bank_account_money(self, req: AccountBanlanceRequest):\n \"\"\" 查询银行余额 \"\"\"\n self.reqid += 1\n reqd = {\n \"BankID\": req.bank_id,\n # \"BankBranchID\": req.bank_branch_id,\n \"BrokerID\": self.brokerid,\n # \"BrokerBranchID\": req.broker_branch_id,\n \"BankAccount\": req.bank_account,\n \"BankPassWord\": req.bank_password,\n \"AccountID\": self.userid,\n \"Password\": self.password,\n \"CurrencyID\": req.currency_id,\n \"SecuPwdFlag\": THOST_FTDC_BPWDF_BlankCheck\n }\n self.reqQueryBankAccountMoneyByFuture(reqd, self.reqid)\n\n def query_account_register(self, req: AccountRegisterRequest):\n \"\"\" 查询银行账户 \"\"\"\n self.reqid += 1\n reqd = \\\n {\n \"BrokerID\": self.brokerid,\n \"AccountID\": self.userid,\n \"BankID\": req.bank_id,\n # \"BankBranchID\": req.bank_branch_id,\n \"CurrencyID\": req.currency_id\n }\n self.reqQryAccountregister(reqd, self.reqid)\n\n def transfer(self, req: TransferRequest, type):\n \"\"\" 银行和证券互转 \"\"\"\n self.reqid += 1\n reqd = {\n \"BankID\": req.bank_id,\n # \"BankBranchID\": req.bank_branch_id,\n \"BrokerID\": self.brokerid,\n # \"BrokerBranchID\": req.broker_branch_id,\n \"BankAccount\": req.bank_account,\n \"BankPassWord\": req.band_password,\n \"AccountID\": self.userid,\n \"Password\": self.password,\n \"CurrencyID\": req.currency_id,\n \"TradeAmount\": req.trade_account,\n \"SecuPwdFlag\": THOST_FTDC_BPWDF_BlankCheck,\n }\n if type == \"to_bank\":\n self.reqFromBankToFutureByFuture(reqd, self.reqid)\n if type == \"to_trade\":\n self.ReqFromFutureToBankByFuture(reqd, self.reqid)\n\n def login(self):\n \"\"\"\n Login into server.\n \"\"\"\n if self.login_failed:\n return\n\n req = {\n \"UserID\": self.userid,\n \"Password\": self.password,\n \"BrokerID\": self.brokerid,\n \"AppID\": self.appid\n }\n if self.product_info:\n req[\"UserProductInfo\"] = self.product_info\n\n self.reqid += 1\n self.reqUserLogin(req, self.reqid)\n\n def onRspQryDepthMarketData(self, data, error, reqid, last):\n try:\n exchange = self.symbol_exchange_mapping[data['InstrumentID']]\n except KeyError:\n return\n market = LastData(\n symbol=data['InstrumentID'],\n exchange=exchange,\n pre_open_interest=data['PreOpenInterest'],\n open_interest=data['OpenInterest'],\n volume=data['Volume'],\n last_price=data['LastPrice']\n )\n self.on_event(type=EVENT_LAST, data=market)\n self.position_instrument_mapping[market.symbol] = True\n if last:\n # 回调初始化完成\n if False not in self.position_instrument_mapping.values():\n self.init_status = True\n self.on_event(type=EVENT_INIT_FINISHED, data=True)\n\n def request_market_data(self, req: object):\n \"\"\" 请求市场数据 \"\"\"\n\n self.reqid += 1\n self.reqQryDepthMarketData({}, self.reqid)\n\n def send_order(self, req: OrderRequest, **kwargs):\n \"\"\"\n Send new order.\n \"\"\"\n self.order_ref += 1\n ctp_req = {\n \"InstrumentID\": req.symbol,\n \"LimitPrice\": req.price,\n \"VolumeTotalOriginal\": int(req.volume),\n \"OrderPriceType\": ORDERTYPE_VT2CTP.get(req.type, \"\"),\n \"Direction\": DIRECTION_VT2CTP.get(req.direction, \"\"),\n \"CombOffsetFlag\": OFFSET_VT2CTP.get(req.offset, \"\"),\n \"OrderRef\": str(self.order_ref),\n \"InvestorID\": self.userid,\n \"UserID\": self.userid,\n \"BrokerID\": self.brokerid,\n \"CombHedgeFlag\": THOST_FTDC_HF_Speculation,\n \"ContingentCondition\": THOST_FTDC_CC_Immediately,\n \"ForceCloseReason\": THOST_FTDC_FCC_NotForceClose,\n \"IsAutoSuspend\": 0,\n \"TimeCondition\": THOST_FTDC_TC_GFD,\n \"VolumeCondition\": THOST_FTDC_VC_AV,\n \"MinVolume\": 1,\n \"ExchangeID\": req.exchange.value if isinstance(req.exchange, Exchange) else req.exchange\n }\n\n if req.type == OrderType.FAK:\n ctp_req[\"OrderPriceType\"] = THOST_FTDC_OPT_LimitPrice\n ctp_req[\"TimeCondition\"] = THOST_FTDC_TC_IOC\n ctp_req[\"VolumeCondition\"] = THOST_FTDC_VC_AV\n elif req.type == OrderType.FOK:\n ctp_req[\"OrderPriceType\"] = THOST_FTDC_OPT_LimitPrice\n ctp_req[\"TimeCondition\"] = THOST_FTDC_TC_IOC\n ctp_req[\"VolumeCondition\"] = THOST_FTDC_VC_CV\n\n self.reqid += 1\n self.reqOrderInsert(ctp_req, self.reqid)\n order_id = f\"{self.frontid}_{self.sessionid}_{self.order_ref}\"\n order = req._create_order_data(order_id, self.gateway_name)\n self.on_event(type=EVENT_ORDER, data=order)\n return order.local_order_id\n\n def cancel_order(self, req: CancelRequest, **kwargs):\n \"\"\"\n Cancel existing order.\n \"\"\"\n frontid, sessionid, order_ref = req.order_id.split(\"_\")\n ctp_req = {\n \"InstrumentID\": req.symbol,\n \"OrderRef\": order_ref,\n \"FrontID\": int(frontid),\n \"SessionID\": int(sessionid),\n \"ActionFlag\": THOST_FTDC_AF_Delete,\n \"BrokerID\": self.brokerid,\n \"InvestorID\": self.userid,\n \"ExchangeID\": req.exchange.value\n }\n\n self.reqid += 1\n return self.reqOrderAction(ctp_req, self.reqid)\n\n def query_account(self):\n \"\"\"\n Query account balance data.\n \"\"\"\n self.reqid += 1\n return self.reqQryTradingAccount({}, self.reqid)\n\n def query_position(self):\n \"\"\"\n Query position holding data.\n \"\"\"\n if not symbol_exchange_map:\n return\n req = {\n \"BrokerID\": self.brokerid,\n \"InvestorID\": self.userid\n }\n\n self.reqid += 1\n return self.reqQryInvestorPosition(req, self.reqid)\n\n def close(self):\n \"\"\"\"\"\"\n if self.connect_status:\n print(\"release Trading API\")\n p = self.exit()\n", "sub_path": "ctpbee/interface/ctp_rohon/td_api.py", "file_name": "td_api.py", "file_ext": "py", "file_size_in_byte": 25740, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "collections.defaultdict", "line_number": 64, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 343, "usage_type": "call"}]} +{"seq_id": "211332437", "text": "import pickle\nfrom math import *\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\n#The analytical solution of F comes from the code in the Test_Example.py file from the project website, and so\n#The Test_Example file is imported such that we can use it to find F in the code.\n\n#Method as specified in the assignment file. Has an extra input value d, as this is necessary for some of the problems.\ndef fredholm_rhs (xc, F, d):\n '''Set up the RHS of the system\n INPUT :\n xc : defines collocation points\n F : function defining the geological survey measurements\n OUTPUT:\n vector defining the RHS of the system'''\n Nc = len(xc)\n b = np.zeros(Nc)\n for i in range(Nc):\n b[i] = F(xc[i], d)\n return b\n\n\n#Method as specified in the assignment file. Has an extra input value d, as this is necessary for some of the problems.\ndef fredholm_lhs (xc, xs, xq, w, K, d):\n '''\n Set up the LHS of the system\n INPUT:\n xc: collocation points\n xs: source points\n xq, w: numerical quadrature\n K: integral kernel\n OUTPUT:\n matrix defining the LHS of the system'''\n Nc = len(xc)\n Ns = len(xs)\n A = np.zeros((Nc, Ns))\n #FIXIT : implement the function!\n #Triple for loop to generate the matrix A.\n for i in range(Nc):\n for j in range(Ns):\n for k in range(len(xq)):\n A[i][j] += K(xc[i], xq[k], d) * w[k] * Lagrange_Basis(j, xq[k], xs, Ns)\n return A\n\n#Generates chebyshev's interpolation nodes with k as the input value n, in the interval [0, 1].\ndef Chebyshev(n):\n a = []\n for i in range(1, n+1):\n a.append(0.5 - 0.5*cos(pi*(2*i-1)/(2*n)))\n return a\n\n#For the last problem it seemed like it might be relevant to have the option to generate Chebyshev's interpolation\n#nodes in the general range a, b instead of the specific interval [0, 1], so this method is meant to do this.\ndef Chebyshev2(n, a, b):\n r = []\n for i in range(1, n+1):\n r.append(0.5*(a+b)+0.5*(a-b)*cos(pi*(2*i-1)/(2*n)))\n return r\n\n#Returns nodes and weights for the trapezoid method in the interval [0, 1] for use in problem 3.\ndef Trapezoid(n):\n xq = []\n w = []\n for i in range(n+1):\n xq.append(i/n)\n if i == 0 or i == n:\n w.append(0.5/n)\n else:\n w.append(1/n)\n return xq, w\n\n#Returns nodes and weights for the legendre gauss quadrature in the interval [0, 1] for use in problem 4-7.\ndef Legendre(n):\n x1, w1 = np.polynomial.legendre.leggauss(n)\n xq = [0.5*(x+1) for x in x1]\n w = [0.5*x for x in w1]\n return xq, w\n\n#Returns nodes and weights for the legendre gauss quadrature in the generic interval [a, b] for use in problem 8.\ndef Legendre2(n, a, b):\n x1, w1 = np.polynomial.legendre.leggauss(n)\n xq = [((b-a)*x+b+a)/2 for x in x1]\n w = [0.5*(b-a)*x for x in w1]\n return xq, w\n\n#Returns Lj(x) for use in creating the matrix A in most of the problems.\ndef Lagrange_Basis (j, xq, xs, ran):\n L = 1\n for i in range(ran):\n if j != i:\n L *= (xq-xs[i])/(xs[j]-xs[i])\n return L\n\n#Returns the analytical density of p for all the x-values in the list a.\ndef Density(a):\n p = []\n for i in range(len(a)):\n p.append(sin(3*pi*a[i])*exp(-2*a[i]))\n return p\n\n#For use in problem 3 and 4. Takes as input the list p with the densities at various points, xc and xs which are lists of collocation\n#and source points, K and F which are functions, d, the depth, and method, which is either Legendre or Trapezoid depending on\n#whether we want the graphs for problem 3 or 4.\ndef Gen_Error(p, xc, xs, K, F, method, d):\n x = []\n y = []\n b = fredholm_rhs(xc, F, d)\n #This for loop loops through Nq = 2^i for i=1, .., 8 and then appends 2^i, inf norm given Nq = 2^i to the lists x and y\n #And returns these 2 lists.\n for i in range(1, 20):\n print(i)\n xq, w = method(10*i)\n x.append(10*i)\n A = fredholm_lhs(xc, xs, xq, w, K, d)\n Ap = np.dot(A, p).tolist()\n r = []\n for j in range(len(Ap)):\n r.append(abs(Ap[j]-b[j]))\n y.append(max(r))\n return x, y\n\n#Takes as input x, a list of x-values, and y, which is a list containing multiple lists yi that contain y-values corresponding\n#to the x-values, and x to yi for each yi in y.\ndef Plot_func(x, y):\n for yi in y:\n plt.plot(x, yi)\n plt.yscale('log')\n plt.show()\n\n#Used in problem 5 - takes as input minimum and maximum values for Nc given Nc=Ns. Iterates through these and for each value\n#Solves the linear system Ap=b to find p, and calculates the inf-norm of p minus the analytical solution.\n#Returns x, y where x is the Nq-values, and y contains the corresponding values of the inf-norm.\ndef Gen_Error_p(start, end, K, F, method, d):\n x = []\n y = []\n for i in range(start, end+1):\n print(i)\n xc = Chebyshev(i)\n b = fredholm_rhs(xc, F, d)\n xq, w = method(i**2)\n A = fredholm_lhs(xc, xc, xq, w, K, d)\n p = np.linalg.solve(A, b)\n p2 = Density(xc)\n r = []\n for j in range(len(p)):\n r.append(abs(p[j]-p2[j]))\n x.append(i)\n y.append(max(r))\n return x, y\n\n#Generates b and the perturbed version of b at a given depth d, and plots the both in the same graph.\ndef Gen_Perturbed(n, F, d):\n x = []\n y = []\n xc = Chebyshev(n)\n b = fredholm_rhs(xc, F, d)\n b2 = [x*(1+np.random.uniform(-10**-3, 10**-3)) for x in b]\n plt.plot(xc, b, label = 'Not perturbed')\n plt.plot(xc, b2, label = 'Perturbed')\n plt.legend()\n plt.show()\n\n#Given a depth d, and Nc = n this method solves the system Ap = b for b not perturbed and b perturbed and plots these 2\n#together with the analytical solution of p in the same plot.\ndef Gen_plot_perturbed(n, F, K, method, d):\n xc = Chebyshev(n)\n b = fredholm_rhs(xc, F, d)\n b2 = [x*(1+np.random.uniform(-10**-3, 10**-3)) for x in b]\n xq, w = method(10*n)\n A = fredholm_lhs(xc, xc, xq, w, K, d)\n p1 = np.linalg.solve(A, b)\n p2 = np.linalg.solve(A, b2)\n p3 = Density(xc)\n plt.plot(xc, p1, label = 'Not perturbed')\n plt.plot(xc, p2, label = 'Perturbed')\n plt.plot(xc, p3, label = 'Analytical solution')\n plt.legend()\n plt.show()\n\n#This method calculates a perturbed version of b, and then solves the system (ATA + Lambda*I) * p = ATb\n#for Lambda = 10^i for i=-14, -13, ..., 0, 1 and for each lambda plots the numerical estimate and the analytical solution\n#in the same graph. Afterwards it also plots the error as a function of lambda in a loglog plot to use for trying to find the\n#optimal lambda given this specific perturbation.\ndef Tikhonov(n, F, K, method, d):\n xc = Chebyshev(n)\n b = fredholm_rhs(xc, F, d)\n xq, w = method(10*n)\n A = fredholm_lhs(xc, xc, xq, w, K, d)\n p1 = Density(xc)\n b2 = [x*(1+np.random.uniform(-10**-3, 10**-3)) for x in b]\n e = []\n l = []\n for i in range(-14, 2):\n print(i)\n lhs = np.dot(A.T, A) + np.dot(10**i, np.identity(n))\n rhs = np.dot(A.T, b2)\n p = np.linalg.solve(lhs, rhs)\n plt.plot(xc, p, label = 'Numerical estimate')\n plt.plot(xc, p1, label = 'Analytical solution')\n plt.legend()\n plt.show()\n r = []\n for j in range(len(p)):\n r.append(abs(p[j]-p1[j]))\n e.append(max(r))\n l.append(10**i)\n plt.plot(l, e, label = 'Error as function of lambda, d=0.25')\n plt.legend()\n plt.xscale('log')\n plt.yscale('log')\n plt.show()\n\n#This method takes as input a file with values of xc and F(xc) and for lambda = 10^i for i = -14, -13, ..., 0, 1 and plots\n#the graph given this lambda. Unfortunately we don't here have an analytical solution to plot against, so we will have to guess\n#what the optimal plot looks like. Generally this was done by observing when the plots were very similar for multiple different\n#lambda values in a row, as it seems as though the graph is stable for a while before and after the \"optimal\" lambda value usually.\n#Generally this tended to be around lambda = 10^-5 or 10^-4.\ndef Reconstruct_Density(file, K):\n f = open(file, 'rb')\n npzfile = np.load(f)\n #sinus = lambda x: sin(5*pi*x)\n #y = [sinus(x) for x in npzfile['xc']]\n xs = Chebyshev2(len(npzfile['xc']), npzfile['a'], npzfile['b'])\n xq, w = Legendre2(len(npzfile['xc']**2), npzfile['a'], npzfile['b'])\n A = fredholm_lhs(npzfile['xc'], xs, xq, w, K, npzfile['d'])\n r = [npzfile['xc']]\n for i in range(-14, 2):\n print(i)\n lhs = np.dot(A.T, A) + np.dot(10**i, np.identity(len(npzfile['xc'])))\n rhs = np.dot(A.T, npzfile['F'])\n p = np.linalg.solve(lhs, rhs)\n plt.plot(npzfile['xc'], p)\n r.append(p)\n #plt.plot(npzfile['xc'], y, label = 'The function y=sin(5*Pi*x)')\n # plt.legend()\n plt.show()\n return r\n\n#This code snippet runs the code necessary for problem 3 and 4 (Line with Legendre in it is problem 4 and line with Trapezoid is problem 3).\n'''a = Chebyshev(40)\np = Density(a)\nK = lambda x, y, d: d * (d**2 + (y-x)**2)**(-3/2)\nF = pickle.load( open( \"F.pkl\", \"rb\" ) )\nx, y1 = Gen_Error(p, a, a, K, F, Legendre, 0.025)\nx, y2 = Gen_Error(p, a, a, K, F, Trapezoid, 0.025)\ny = [y1, y2]\nPlot_func(x, y)'''\n\n#This code snippet runs problem 5 and plots the 3 graphs at different depths as a function of Nc.\n'''K = lambda x, y, d: d * (d**2 + (y-x)**2)**(-3/2)\nF = pickle.load( open( \"F.pkl\", \"rb\" ) )\nx, y1 = Gen_Error_p(5, 30, K, F, Legendre, 0.025)\nx, y2 = Gen_Error_p(5, 30, K, F, Legendre, 0.25)\nx, y3 = Gen_Error_p(5, 30, K, F, Legendre, 2.5)\ny = [y1, y2, y3]\nPlot_func(x, y)'''\n\n#This code snippet generates an exact and perturbed graph for problem 6 (that tended to just be basically the same graph from\n#the perspective of a viewer as the perturbations of 0.1% or less weren't enough to make for very visible differences.\nF = pickle.load( open( \"F.pkl\", \"rb\" ) )\nGen_Perturbed(30, F, 0.025)\n\n#This code snippet is used for problem 6 and 7 - uncomment gen_plot_perturbed for problem 6 and tikhonov for problem 7.\n#Input desired d instead of 2.5 if you want some other depth.\nK = lambda x, y, d: d * (d**2 + (y-x)**2)**(-3/2)\nF = pickle.load( open( \"F.pkl\", \"rb\" ) )\nGen_plot_perturbed(30, F, K, Legendre, 0.25)\nTikhonov(30, F, K, Legendre, 0.25)\n\n#The first 4 lines will plot a bunch of graphs and such for each of the 3 files separately to try to find the optimal lambdas.\n#The last half will iterate through the different plots with the values from each of the 3 files to have all the graphs in the\n#same plot. In each case it will start at lambda = 10^-14 and end at lambda =10^1, printing the exponent i at each step.\n\n\nK = lambda x, y, d: d * (d**2 + (y-x)**2)**(-3/2)\nr1 = Reconstruct_Density('q8_1.npz', K)\nr2 = Reconstruct_Density('q8_2.npz', K)\nr3 = Reconstruct_Density('q8_3.npz', K)\nfor i in range(1, len(r1)):\n print(-15+i)\n plt.plot(r1[0], r1[i], label = 'First file')\n plt.plot(r2[0], r2[i], label = 'Second file')\n plt.plot(r3[0], r3[i], label = 'Third file')\n plt.legend()\n plt.show()\n", "sub_path": "Prosjekt1/oj.py", "file_name": "oj.py", "file_ext": "py", "file_size_in_byte": 11095, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.polynomial.legendre.leggauss", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.polynomial", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.polynomial.legendre.leggauss", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.polynomial", "line_number": 84, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "numpy.linalg.solve", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 145, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 160, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "numpy.random.uniform", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 171, "usage_type": "attribute"}, {"api_name": "numpy.linalg.solve", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 174, "usage_type": "attribute"}, {"api_name": "numpy.linalg.solve", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 175, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "numpy.random.uniform", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 200, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 234, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 263, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 286, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 286, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 287, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 287, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}]} +{"seq_id": "90805623", "text": "import socket\nimport json\n\nclass Server():\n\t\"\"\"TCP Server\"\"\"\n\n\tHOST = 'localhost'\n\tPORT = 9999\n\trollodex = {}\n\n\tdef __init__(self):\n\t\tself.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.server.bind((self.HOST, self.PORT))\n\t\tself.server.listen(5)\n\t\twhile True:\n\t\t\tconnection, address = self.server.accept()\n\t\t\tprint('Connected by', address)\n\t\t\tprint(connection)\n\t\t\t# client_socket = client_thread(connection)\n\t\t\t# client_socket.run()\n\t\t\t# send connect and add into dictionary\n\t\t\tself.register_client(address, connection)\n\n\t\t# while True:\n\t\t# \tdata = connection.recv(1024)\n\t\t# \tif not data: break\n\t\t# \tconnection.send(data)\n\t\t# self.register_client(address, connection)\n\t\t# connection.close()\n\n\tdef register_client(self, address, connection):\n\t\thost = address[0]\n\t\tport = address[1]\n\t\tself.rollodex.update({\n\t\t\t(\"username\" + str(port)): {\n\t\t\t\t\"HOST\": host,\n\t\t\t\t\"PORT\": port,\n\t\t\t\t\"status\": \"active\"\n\t\t\t}\n\t\t})\n\t\tprint(self.rollodex)\n\t\tself.return_active_clients(connection)\n\t# \treturn the rollodex as data to who is active\n\n\tdef return_active_clients(self, connection):\n\t\t# data = connection.recv(1024)\n\t\t# print(data)\n\t\tencoded = json.dumps(self.rollodex).encode('utf-8')\n\t\t# if data == b'1':\n\t\tconnection.send(encoded)\n\t\t# return self.rollodex\n\n\n\tdef register_clientelles(self):\n\t\t# CONNECTION\n\t\t# CONNECTION = new socket obj usable to send & recv data on connection\n\t\t# \n\t\tconnection, address = self.server.accept()\n\t\t# ADDRESS\n\t\t# ADDRESS = address bound to the socket on the other end of connection\n\t\t# ('127.0.0.1', 51196)\n\t\tprint('Connected by', address)\n\t\tprint(connection)\n\t\twhile True:\n\t\t\tdata = connection.recv(1024)\n\t\t\tif not data: break\n\t\t\tconnection.send(data)\n\t\tconnection.close()\n\nlaunch = Server()", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1872, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "socket.socket", "line_number": 12, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 12, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 12, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "393878376", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport boto3\nimport sys\nimport configparser\n\ndef main():\n try:\n # ——————————————————————————————————\n # ドキュメントのディレクトリを引数で取得\n # ——————————————————————————————————\n repository = sys.argv[1]\n document = sys.argv[2]\n test_flg = sys.argv[3]\n pdf_name = sys.argv[4]\n path = repository + '/' + document\n\n # ——————————————————————————————————\n # S3の設定を取得\n # ——————————————————————————————————\n inifile = configparser.ConfigParser()\n # 全体設定\n inifile.read(repository + '/config.ini')\n\n BUCKET = inifile.get('S3', 'BUCKET')\n KEY = inifile.get('S3', 'KEY')\n SECRET = inifile.get('S3', 'SECRET')\n REGION = inifile.get('S3', 'REGION')\n\n # ——————————————————————————————————\n # S3へアップロード\n # ——————————————————————————————————\n s3 = boto3.resource('s3',\n aws_access_key_id= KEY,\n aws_secret_access_key= SECRET,\n region_name= REGION)\n # ファイル送信\n ret = s3.meta.client.upload_file(path + '/' + pdf_name, BUCKET ,document + '/' + pdf_name)\n\n obj = s3.Object(BUCKET ,document + '/' + pdf_name)\n new_pdf_name = pdf_name + '?versionId=' + obj.version_id\n print(new_pdf_name)\n return 0\n except Exception as e:\n print(type(e))\n print(e)\n return 1\n\nif __name__ == '__main__':\n sys.exit(main())\n", "sub_path": "py/s3_upload_pdf.py", "file_name": "s3_upload_pdf.py", "file_ext": "py", "file_size_in_byte": 2007, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 22, "usage_type": "call"}, {"api_name": "boto3.resource", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "534919675", "text": "#!/usr/bin/evn python\n\"\"\"\nBlockyTalky - User code sender\n\nThis daemon periodically checks for new user code, then POSTs it \nto code_receiver.py, which is running on a remote server.\n\"\"\"\n\nimport os\nimport time\nimport socket\nimport requests\n\nos.nice(3)\n\nclass CodeSender(object):\n\tCHECK_INTERVAL = 60\n\n\tdef __init__(self):\n\t\tself.unitname = socket.gethostname()\n\t\tself.headers = {'unit-name': self.unitname}\n\n\tdef start(self):\n\t\tself.schedule_code_check()\n\n\tdef schedule_code_check(self):\n\t\t#print \"Scheduling a new check in %d seconds\" % self.__class__.CHECK_INTERVAL\n\t\ttime.sleep(self.__class__.CHECK_INTERVAL)\n\t\tself.check_code_and_reschedule()\n\n\tdef check_code_and_reschedule(self):\n\t\tself.check_code()\n\t\tself.schedule_code_check()\n\n\tdef check_code(self):\n\t\t\"\"\"\n\t\tChecks for code that has not yet been POSTed to the server (in usercode/), \n\t\tand sends it. After the code is sent, files are locally moved to sentcode/, \n\t\totherwise files remain in the directory as the POST failed.\n\t\t\"\"\"\n\n\t\tos.chdir('/home/pi/blockytalky/usercode/')\n\t\t\n\t\tfor file in os.listdir('/home/pi/blockytalky/usercode/'):\n\t\t\tfo = open(file, \"rb\")\n\t\t\tcode = fo.read()\n\t\t\tfo.close()\n\t\t\ttry:\n\t\t\t\trequest = requests.post(\"http://104.131.249.150:5000\", data=code, headers=self.headers)\n\t\t\t\tnewfile = \"/home/pi/blockytalky/sentcode/\" + str(file)\n\t\t\t\tos.rename(file, newfile)\n\t\t\texcept:\n\t\t\t\t# POST failed, leave file, try again next loop\n\t\t\t\tpass\n\nif __name__ == \"__main__\":\n\tcs = CodeSender()\n\tcs.start()", "sub_path": "backend/code_uploader.py", "file_name": "code_uploader.py", "file_ext": "py", "file_size_in_byte": 1474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.nice", "line_number": 14, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 20, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 42, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 44, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 49, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "331796852", "text": "import pygame\r\nimport random\r\nimport sys\r\n\r\nWIDTH = 800\r\nHEIGHT = 600\r\nFRAMES = 25\r\n\r\nclass Point:\r\n def __init__(self):\r\n self.radius = 10\r\n self.divisor = 9\r\n\r\n self.lower_bound = 1 / self.divisor\r\n self.upper_bound = (self.divisor - 1) / self.divisor\r\n\r\npoint = Point()\r\n\r\n# Color Definitions\r\nblack = (0, 0, 0)\r\nwhite = (255, 255, 255)\r\nred = (255, 0, 0)\r\ngreen = (0, 255, 0)\r\nblue = (0, 0, 255)\r\n\r\ndots = []\r\ndot_size = 1 # In pixels\r\n\r\nA = (round(WIDTH / 2), round(point.lower_bound * HEIGHT))\r\ndisplay_a = (A[0], A[1] - 30)\r\n\r\nB = (round(point.lower_bound * WIDTH), round(point.upper_bound * HEIGHT))\r\ndisplay_b = (B[0] - 30, B[1])\r\n\r\nC = (round(point.upper_bound * WIDTH), round(point.upper_bound * HEIGHT))\r\ndisplay_c = (C[0] + 30, C[1])\r\n\r\n\r\ndef text_objects(text, font):\r\n textSurface = font.render(text, True, black)\r\n return textSurface, textSurface.get_rect()\r\n\r\n\r\ndef message_display(text, pos):\r\n largeText = pygame.font.Font('freesansbold.ttf', 40)\r\n TextSurf, TextRect = text_objects(text, largeText)\r\n TextRect.center = (pos)\r\n display.blit(TextSurf, TextRect)\r\n\r\nstart = (400, 300) # Change starting point here\r\ncurrent_position = start\r\n\r\n\r\n# Pygame init\r\npygame.init()\r\ndisplay = pygame.display.set_mode((WIDTH, HEIGHT))\r\npygame.display.set_caption(\"Chaos Game\")\r\nclock = pygame.time.Clock()\r\n\r\nwhile True:\r\n\r\n # Handle Events\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n random_number = random.randint(1, 6)\r\n\r\n # The formula for the middle point of a line drawn between two points\r\n # is M = ((x1 + x2) / 2, (y1 + y2) / 2)\r\n\r\n if random_number in [1, 2]:\r\n # A[0] == x; A[1] == y\r\n x = round((A[0] + current_position[0]) / 2)\r\n y = round((A[1] + current_position[1]) / 2)\r\n\r\n current_position = (x, y)\r\n dots.append(current_position)\r\n\r\n elif random_number in [3, 4]:\r\n # B[0] == x; B[1] == y\r\n x = round((B[0] + current_position[0]) / 2)\r\n y = round((B[1] + current_position[1]) / 2)\r\n\r\n current_position = (x, y)\r\n dots.append(current_position)\r\n\r\n elif random_number in [5, 6]:\r\n # C[0] == x; C[1] == y\r\n x = round((C[0] + current_position[0]) / 2)\r\n y = round((C[1] + current_position[1]) / 2)\r\n\r\n current_position = (x, y)\r\n dots.append(current_position)\r\n\r\n display.fill(white)\r\n\r\n for dot in dots:\r\n pygame.draw.circle(display, black, dot, dot_size)\r\n\r\n message_display(\"A\", display_a)\r\n pygame.draw.circle(display, red, A, point.radius)\r\n\r\n message_display(\"B\", display_b)\r\n pygame.draw.circle(display, red, B, point.radius)\r\n\r\n message_display(\"C\", display_c)\r\n pygame.draw.circle(display, red, C, point.radius)\r\n\r\n # Starting Position\r\n pygame.draw.circle(display, green, start, point.radius)\r\n\r\n\r\n pygame.display.update()\r\n clock.tick(FRAMES)\r\n\r\nif __name__ == \"__main__\":\r\n game_loop() # Start the game loop\r\n pygame.quit()\r\n", "sub_path": "triangle.py", "file_name": "triangle.py", "file_ext": "py", "file_size_in_byte": 3076, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pygame.font.Font", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 66, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 67, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 101, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 104, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 110, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 113, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 116, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "639276590", "text": "import re\n\nfrom django.db.models import Q, Min\nfrom OverApp.models import RoomInfo, HotelAvailability\n\ndef normalize_query(query_string,\n findterms=re.compile(r'\"([^\"]+)\"|(\\S+)').findall,\n normspace=re.compile(r'\\s{2,}').sub):\n\n return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]\n\n\ndef build_query(query_string, search_fields):\n ''' Returns a query, that is a combination of Q objects. That combination\n aims to search keywords within a model by testing the given search fields.\n\n '''\n query = None # Query to search for every search term\n terms = normalize_query(query_string)\n for term in terms:\n or_query = None # Query to search for a given term in each field\n for field_name in search_fields:\n q = Q(**{\"%s__icontains\" % field_name: term})\n\n if or_query:\n or_query = or_query |q\n else:\n or_query = q\n\n\n if query:\n query = query & or_query\n else:\n query = or_query\n return query\n\ndef get_lowest_price(pk):\n # include dates in the future\n\n default_price = RoomInfo.objects.filter(hotel_id=pk).aggregate(lowest=Min('ratePerNight'))\n season_price = HotelAvailability.objects.filter(hotel_id=pk).aggregate(lowest=Min('ratePerNight'))\n \n if not season_price.get('lowest'):\n return default_price.get('lowest')\n if default_price.get('lowest') < season_price.get('lowest') and default_price.get('lowest') != 0:\n return default_price.get('lowest')\n else:\n return season_price.get('lowest')\n\n\ndef generic_search(request, model, fields, query):\n \"\"\"\n \"\"\"\n\n query_string = query\n\n if not query_string:\n entries = model.objects.all()\n else:\n entry_query = build_query(query_string, fields)\n entries = model.objects.filter(entry_query)\n\n for entry in entries:\n entry.lowest = get_lowest_price(entry.pk)\n amenities = entry.hotelAmens.split(',')\n entry.hotelAmens = amenities\n rooms = entry.hotelRoomTypes.split(',')\n entry.hotelRoomTypes = rooms\n services = entry.hotelServices.split(',')\n entry.hotelServices = services\n\n\n return entries", "sub_path": "OverApp/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2269, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "re.compile", "line_number": 7, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 23, "usage_type": "call"}, {"api_name": "OverApp.models.RoomInfo.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "OverApp.models.RoomInfo.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "OverApp.models.RoomInfo", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.Min", "line_number": 40, "usage_type": "call"}, {"api_name": "OverApp.models.HotelAvailability.objects.filter", "line_number": 41, "usage_type": "call"}, {"api_name": "OverApp.models.HotelAvailability.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "OverApp.models.HotelAvailability", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.Min", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "401188337", "text": "\nfrom scipy.sparse import csr_matrix\nfrom scipy.linalg import eig\nfrom numpy import empty as empty_matrix\n\ntry:\n from numpy import VisibleDeprecationWarning\n import warnings\n warnings.filterwarnings(\"ignore\", category=VisibleDeprecationWarning)\nexcept ImportError:\n pass\n\ndef pagerank_weighted_scipy(graph, damping=0.85):\n adjacency_matrix = build_adjacency_matrix(graph)\n probability_matrix = build_probability_matrix(graph)\n\n pagerank_matrix = damping * adjacency_matrix.todense() + (1 - damping) * probability_matrix\n \n vals, vecs = eig(pagerank_matrix, left=True, right=False)\n return process_results(graph, vecs)\n\n\ndef build_adjacency_matrix(graph):\n row = []\n col = []\n data = []\n nodes = graph.nodes()\n length = len(nodes)\n\n for i in range(length):\n current_node = nodes[i]\n neighbors_sum = sum(graph.edge_weight((current_node, neighbor)) for neighbor in graph.neighbors(current_node))\n for j in range(length):\n edge_weight = float(graph.edge_weight((current_node, nodes[j])))\n if i != j and edge_weight != 0:\n row.append(i)\n col.append(j)\n data.append(edge_weight / neighbors_sum)\n\n return csr_matrix((data,(row,col)), shape=(length,length))\n\n\ndef build_probability_matrix(graph):\n dimension = len(graph.nodes())\n matrix = empty_matrix((dimension,dimension))\n\n probability = 1 / float(dimension)\n matrix.fill(probability)\n\n return matrix\n\n\ndef process_results(graph, vecs):\n scores = {}\n for i, node in enumerate(graph.nodes()):\n scores[node] = abs(vecs[i][0])\n\n return scores\n", "sub_path": "pagerank_weighted.py", "file_name": "pagerank_weighted.py", "file_ext": "py", "file_size_in_byte": 1655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "warnings.filterwarnings", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.VisibleDeprecationWarning", "line_number": 9, "usage_type": "name"}, {"api_name": "scipy.linalg.eig", "line_number": 19, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "107033103", "text": "import itertools\nfrom math import radians, cos, sin, asin, sqrt\nimport warnings\n\nimport networkx as nx\n\nfrom dtk.tools.migration.GraphGenerator import GraphGenerator\n\n\nclass GeoGraphGenerator(GraphGenerator):\n \"\"\"\n A geographical graph generator (connectivity depends on the distance between nodes).\n A future refactor may have a number graph generator types implementing a generic interface GraphTopoGenerator.\n\n \"\"\"\n\n def __init__(self, migration_network_file_path: str, demographics_file_path: str, migration_radius=2):\n \"\"\"\n\n Args:\n migration_network_file_path: The path to migration network file.\n demographics_file_path: The path to the demographics file.\n migration_radius: How far people would travel on foot in units of neighborhood hops; 1 is equal to the 8 adjacent nodes, 2 is equal to 24 adjacent nodes.\n \"\"\"\n super().__init__(migration_network_file_path, demographics_file_path)\n warnings.warn(\"GeoGraphGenerator is deprecated.\", DeprecationWarning)\n\n self.migration_radius = migration_radius\n\n def generate_graph(self) -> nx.Graph():\n \"\"\"\n Generate a networkx graph based on distances between vertices.\n\n Returns:\n A networkx graph.\n \"\"\"\n\n G = nx.Graph()\n G.position = {}\n G.population = {}\n G.name = {}\n\n for node_id, properties in self.node_properties.items():\n G.add_node(node_id)\n G.name[properties[3]] = node_id\n G.population[node_id] = properties[2]\n G.position[node_id] = (properties[0], properties[1]) # (x,y) for matplotlib\n\n # add an edge between any two nodes distanced less than max_kms away\n\n for n in itertools.combinations(G.nodes(), 2):\n distance = self.get_haversine_distance(G.position[n[0]][0], G.position[n[0]][1], G.position[n[1]][0],\n G.position[n[1]][1])\n if not self.migration_radius or distance < self.migration_radius:\n G.add_edge(n[0], n[1], weight=distance)\n\n # add edge based on adjacency matrix\n for node_id, node_links in self.adjacency_list.items():\n for node_link_id, w in node_links.items():\n distance = self.get_haversine_distance(G.position[int(node_id)][0], G.position[int(node_id)][1],\n G.position[int(node_link_id)][0],\n G.position[int(node_link_id)][1])\n G.add_edge(int(node_id), int(node_link_id), weight=distance * w)\n\n self.graph = G\n\n return G\n\n def get_shortest_paths(self):\n \"\"\"\n Get the shortest paths based on link weights.\n\n Returns:\n Float value of shortest path.\n \"\"\"\n\n return nx.shortest_path_length(self.graph, weight='weight')\n\n @staticmethod\n def get_haversine_distance(lon1, lat1, lon2, lat2) -> float:\n \"\"\"\n Calculate the great circle distance between two points on the earth (specified in decimal degrees).\n \n Args:\n lon1: Longitude for point 1.\n lat1: Latitude for point 1.\n lon2: Longitude for point 2.\n lat2: Latitude for point 2.\n\n Returns:\n Float value of haversine distance.\n \"\"\"\n\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n\n # 6367 km is the radius of the Earth\n km = 6367 * c\n\n return km\n", "sub_path": "dtk/tools/migration/GeoGraphGenerator.py", "file_name": "GeoGraphGenerator.py", "file_ext": "py", "file_size_in_byte": 3795, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "dtk.tools.migration.GraphGenerator.GraphGenerator", "line_number": 10, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 26, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 38, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 51, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 30, "usage_type": "call"}, {"api_name": "networkx.shortest_path_length", "line_number": 77, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 95, "usage_type": "argument"}, {"api_name": "math.sin", "line_number": 100, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 100, "usage_type": "call"}, {"api_name": "math.asin", "line_number": 101, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "180917525", "text": "from websocket import create_connection\nimport ssl\nimport json\n\nimport requests\nfrom multiprocessing.dummy import Pool\nfrom multiprocessing import Queue\nimport threading\n\n\nclass Boot:\n def __init__(self):\n self.queue = Queue()\n self.pool = Pool(30)\n self.urls = []\n self.network = \"\"\n\n self.network_dict = {\n \"\": \"Darwinia IceFrog Testnet\",\n \"kusama\": \"Kusama CC3\",\n \"darwinia\": \"Darwinia IceFrog Testnet\"\n }\n\n def run(self, network):\n self.network = network\n ws = create_connection('wss://telemetry.polkadot.io/feed/',\n sslopt={\"cert_reqs\": ssl.CERT_NONE,\n \"check_hostname\": False,\n \"ssl_version\": ssl.PROTOCOL_TLSv1})\n\n subscribe_topic = \"subscribe:%s\" % self.network_dict[network]\n ws.send(subscribe_topic)\n boot_nodes = []\n block_height = 0\n for i in range(0, 3):\n j = json.loads(ws.recv())\n if type(j) == list and len(j) > 0 and j[0] != 0:\n for index in range(len(j)):\n if j[index] == 1:\n block_height = j[index + 1][0]\n elif j[index] == 3:\n if j[index + 1][4][0] == block_height:\n boot_nodes.append(j[index + 1][0])\n\n ws.close()\n t = threading.Thread()\n t.setDaemon(True)\n t.start()\n\n self.pool.map(self._put_queue, list(boot_nodes))\n while self.queue.empty() is False:\n node_id = self.queue.get()\n self.pool.map_async(self._get_url, [node_id])\n self.pool.close()\n self.pool.join()\n return self.urls\n\n def _put_queue(self, node_id):\n self.queue.put(node_id)\n\n def _get_url(self, node_id):\n j = requests.get(\n url=\"https://telemetry.polkadot.io/network_state/%s/%d\" % (self.network_dict[self.network], node_id),\n timeout=3).json()\n address = list(filter(lambda x:\n x[5:].startswith(\"10\") is False and\n x[5:].startswith(\"172\") is False and\n x[5:].startswith(\"127\") is False,\n j['externalAddresses']))\n if len(address) > 0:\n boot_node = \"{}/p2p/{}\".format(address[0], j['peerId'])\n self.urls.append(boot_node)\n", "sub_path": "config/boot_nodes.py", "file_name": "boot_nodes.py", "file_ext": "py", "file_size_in_byte": 2476, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "multiprocessing.Queue", "line_number": 13, "usage_type": "call"}, {"api_name": "multiprocessing.dummy.Pool", "line_number": 14, "usage_type": "call"}, {"api_name": "websocket.create_connection", "line_number": 26, "usage_type": "call"}, {"api_name": "ssl.CERT_NONE", "line_number": 27, "usage_type": "attribute"}, {"api_name": "ssl.PROTOCOL_TLSv1", "line_number": 29, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 36, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 46, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "564139762", "text": "from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom . import views\n\nurlpatterns = [\n url(r'^(?P[0-9]+)/delete', views.delete_view, name='delete'),\n url(r'^(?P[0-9]+)/guardarCorrecta', views.guardarCorrecta, name='guardarCorrecta'),\n url(r'^agregarPreg', views.agregarPreg, name='agregarPreg'),\n url(r'^uploadquestion', views.uploadquestion, name='uploadquestion'),\n url(r'reported/', views.reported, name='reported'),\n url(r'^(?P[0-9]+)/detail', views.detail_view, name='detail'),\n url(r'^(?P[0-9]+)/(?P[0-9]+)/update', views.update_view, name='update'),\n url(r'^(?P[0-9]+)/(?P[0-9]+)/save', views.save_view, name='save'),\n url(r'^(?P[0-9]+)/sacar', views.sacardereported, name='sacar'),\n url(r'^(?P[0-9]+)/guardar_modif', views.guardar_modif, name='guardar_modif'),\n url(r'^(?P[0-9]+)/modificar', views.modificar_view, name='modificar'),\n url(r'^(?P[0-9]+)/(?P[0-9]+)/modifiResp', views.modifiResp, name='modifiResp'),\n url(r'^buscar', views.buscar_view, name='buscar'),\n url(r'^(?P[0-9]+)/(?P[0-9]+)/eliminar', views.eliminar_resp, name='eliminar_resp'),\n url(r'^temas', views.temas, name='temas'),\n]\n", "sub_path": "questions/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "140356100", "text": "\nimport csv\nimport numpy as np\nfrom Bio import SeqIO\nimport re\nimport sys\n\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.feature_selection import RFE\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\n\nimport time\n#import kameris\n\n###################################################################################################\n################################# READ DATASET ##################################\n# out: matrix [ [seq.id, seq, virus_type], ... ]\n# fa_file: file with sequences FASTA\n# cls_file: csv with id sequence and virus type\ndef generateLabeledData(fa_file, cls_file): \n data = []\n\n # parse csv\n with open(cls_file) as f:\n reader = dict(csv.reader(f))\n\n # parse the sequences, it used GENERATOR (estos no es util para archivos grandes, no llevan todo a memoria y leen poco a poco en cada invocación similara a los iteradores)\n for record in SeqIO.parse(fa_file, \"fasta\"):\n if record.id in reader:\n # Generate table [Id, Sequences, Class]\n data.append([record.id, record.seq.upper(), reader[record.id]])\n\n return data\n\ndef generateData(fa_file):\n data = []\n \n for record in SeqIO.parse(fa_file, \"fasta\"):\n data.append([record.id, record.seq.upper(), None])\n \n return data\n###################################################################################################\n###################################################################################################\n \n\n###################################################################################################\n################################# GENERATE K-MERS ##################################\n# out: vector with all unique substring (k-mers) from training data\n# data: trainign dataset [ [seq.id, seq, virus_type], ... ]\n# k: k size in k-mers\ndef generate_K_mers(data, k):\n \n\t# List of k-mer\n K_mer = []\n dict = {}\n \n # Initialization of the dictionary\n # we generate unique k-mers for all training data\n for d in data:\n for i in range(0, len(d[1]) - k + 1, 1): \n dict[d[1][i:i + k]] = 0\n \t\t\n \t# Remove patterns not use\n # in FASTA format there is other letter that \n for key in dict.keys():\n if bool(re.match('^[ACGT]+$', str(key))) == True: K_mer.append(str(key))\n return K_mer\n###################################################################################################\n###################################################################################################\n\n\n###################################################################################################\n################################# OCURRENCE OF EACH K-MER ##################################\n# IT IS USED IN PREDICTIONS\ndef generateMatrice(data, K_mer, k):\n\t# Variables\n X = []\n\n # Generate K-mer dictionnary\n X_dict = {}\n for i, e in enumerate(K_mer): \n X_dict[e] = 0\n\n #print('K_mer', K_mer)\n #print('len X_dict', len(X_dict))\n\t\n\t# Generates X (matrix attributes)\n for d in data:\n x = []\n x_dict = X_dict.copy()\n\n # Count K-mer occurences (with overlaping)\n for i in range(0, len(d[1]) - k + 1, 1):\n try: x_dict[d[1][i:i + k]] = x_dict[d[1][i:i + k]] + 1; \n except: pass\n\n # Get only occurences from dictionnary\n for value in x_dict:\n x.append(x_dict.get(value))\n X.append(x)\n\n\t# Return matrices X (matrix attributes)\n return X\n\n# k_mrs: vector with all unique substring (k-mers) from training data\n# data: trainign dataset [ [seq.id, seq, virus_type], ... ]\n# k: k size in k-mers\n#out: matrix of ocurrences de each k-mer(substring) per sequences (row:seq, col:k-mer), y: labels\ndef generateXYMatrice(data, K_mer, k):\n\t# Variables\n\tX = generateMatrice(data, K_mer, k)\n\ty = []\n\n\t# Generates y (matrix class)\n\tfor i in data:\n\t\ty.append(i[2])\n\t\n\t# Return matrices X and y (matrix attributes and matrix class)\n\treturn X, y\n###################################################################################################\n###################################################################################################\n\n\n###################################################################################################\n################################# MAX MIN NORMALIZATION ##################################\ndef maxMinNormalization(X):\n X_max = max(max(X))\n if X_max > 1:\t\t\n #print(\"Apply linearly scaling each attribute to the range [0, 1]\")\n minMaxScaler = MinMaxScaler(feature_range=(0, 1), copy = False)\n X = minMaxScaler.fit_transform(X)\n #else: print(\"Scaling not required \")\n \n return X\n\n###################################################################################################\n###################################################################################################\n \n \n###################################################################################################\n############################# RECURSIVE FEATURE ELIMINATION ###############################\ndef recursiveFeatureElimination(X, y, k_mers, features_max):\n preliminary_rfe_step = 0.1 #10%\n #preliminary_rfe_step = 1\n clf = SVC(kernel = \"linear\", C = 1) \n \n # original\n \n if len(X[0]) > features_max:\n #print(\"Preliminary - RFE...\")\t\n rfe = RFE(estimator = clf, n_features_to_select = features_max, step = preliminary_rfe_step)\n new_X = rfe.fit_transform(X, y)\n\n # Update list of k_mers\n for i, value in enumerate(rfe.support_):\n if value == False: k_mers[i] = None\n new_k_mers = list(filter(lambda a: a != None, k_mers))\n\n #print(\"reduce features to \", len(new_k_mers))\n\n return new_X, new_k_mers \n\n \n \n else:\n return X, k_mers \n \n\ndef SVD(X, nfeatures = None):\n #print('performing SVD')\n \n _X = np.matrix(X)\n print('_X.shape', _X.shape)\n #print('_X', _X)\n \n if nfeatures == None:\n mayor_zero = np.count_nonzero(_X > 0.0) # count the > 0 entries\n #print('mayor_zero', mayor_zero)\n mayor_zero = mayor_zero/len(X) # mean \n nfeatures = int(mayor_zero*0.1) # take the 10%\n \n print('nfeaturesn SVD', nfeatures)\n \n svd = TruncatedSVD(algorithm='randomized', n_components = nfeatures)\n #svd = TruncatedSVD(n_components = 54)\n X_transf = svd.fit_transform(_X)\n \n\n print('X_transf.shape', X_transf.shape)\n #print('X_transf', X_transf)\n \n return X_transf, nfeatures\n \n \n###################################################################################################\n################################################################################################### \n \n\n\n###################################################################################################\n############################# EVALUATE FEATURE SIZES ###############################\n# X: ocurrence matrix of each k-mer\ndef evaluateFeatureSizes(X, y, k_mers, range_features, features_max, n_splits):\n clf = SVC(kernel = \"linear\", C = 1) \n \n scores = []\n supports = []\n\n for n in range_features:\n print(\"\\rRFE :\", round(n / features_max * 100, 0), \"%\", end='')\n f_measure = 0\n k_mers_rfe = [] # here we store the k-mers no eliminated\n rfe = RFE(estimator = clf, n_features_to_select = n, step = 1)\n X_rfe = rfe.fit_transform(X, y)\n\n # Update list of k_mers\n for i, j in enumerate(rfe.support_):\n if j == True: k_mers_rfe.append(k_mers[i])\n \n # Evaluation of attributes with F-measure and Cross-validation\n for train_index, test_index in StratifiedKFold(n_splits = n_splits, shuffle=False, random_state=None).split(X_rfe, y):\n X_train, X_test = list(X_rfe[test_index]), list(X_rfe[train_index])\n y_train, y_test = list(y[test_index]), list(y[train_index])\n\n # Prediction\n\n #start_time = time.clock()\n clf.fit(X_train, y_train)\n #print(time.clock() - start_time, \"seconds\")\n y_pred = clf.predict(X_test)\n \n # Calcul metric scores\n f_measure = f_measure + f1_score(y_test, y_pred, average =\"weighted\")\n\n # Calcul mean F-measure\n mean_f_measure = f_measure / n_splits\n\n # Save scores\n scores.append(mean_f_measure)\n supports.append(k_mers_rfe)\n \n return scores, supports\n \n \n###################################################################################################\n################################################################################################### \n \n\n\n###################################################################################################\n############################# GET OPTIMAL SOLUTION ###############################\ndef getOptimalSolution(scores_list, supports_list, k_mers_range, features_range, T):\n best_score = 0\n\t# Optimal score in relation with treshold\n optimal_score = 0\n \n # Identify best solution\n # here we indetified the best set of k-mers (one element in supports_list) based on the max score (scores_list) \n for i, s in enumerate(scores_list):\n if max(s) > best_score:\n best_score = max(s)\n index = s.index(max(s))\n best_k_length = k_mers_range[i] #here we store the value of k in k-mer\n best_k_mers = supports_list[i][index]\n elif max(s) == best_score:\n if s.index(max(s)) < index:\n best_score = max(s)\n index = s.index(max(s))\n best_k_length = k_mers_range[i]\n best_k_mers = supports_list[i][index]\n else: pass\n\n\t# Identify optimal solution\n # here, we considered all the solutions (scores_list) that at least have the T% of the best score \n # and considered the one that have the fewest features\n for i, l in enumerate(scores_list):\n for j, s in enumerate(l):\n if s >= best_score * T and j <= index: \n optimal_score = s\n index = j\n best_k_length = k_mers_range[i]\n best_k_mers = supports_list[i][index]\n print(\"\\nChange optimal solution\")\n\t\t\t\n if optimal_score == 0: optimal_score = best_score\n\n\t# Save plot results\n fig = plt.figure(figsize=(12, 10))\n for i, s in enumerate(scores_list):\n label = str(k_mers_range[i]) + \"-mers\"\n plt.plot(features_range, s, label= label)\n plt.ylabel(\"F-measure\")\n plt.xlabel(\"Number of features\")\n plt.axvline(index + 1, linestyle=':', color='r')\n title = \"F-measure : \" + str(optimal_score) + \" K-mer size : \" + str(best_k_length) + \" Number of features : \" + str(index + 1)\n plt.title(title)\n plt.legend()\n \n \n fname = str('fig_file')\n plt.savefig(fname)\n\n return best_k_mers, best_k_length\n \n###################################################################################################\n################################################################################################### \n# it return the best k-mers and nfeatures based on the paper Toward an Alignment-Free Method for Feature Extraction\n# and Accurate Classification of Viral Sequences\ndef getBestKmersAndFeatures(path, trainingData=None):\n \n \n features_max = 100\n features_min = 1\n n_splits = 5\n k_min = 1\n k_max = 30 \n T = 0.99\n\n range_k_mers = range(k_min, k_max + 1, 1)\n range_features = range(features_min, features_max + 1, 1)\n '''\n features_max = 20\n features_min = 5\n n_splits = 5\n k_min = 5\n k_max = 15 \n T = 0.99\n\n range_k_mers = range(k_min, k_max + 1, 5)\n range_features = range(features_min, features_max + 1, 5)\n '''\n\n scores_list = []\n supports_list = []\n \n if trainingData == None:\n trainingData = generateLabeledData(path + \"/data.fa\", path + \"/class.csv\")\n #trainingData = generateLabeledData(\"../castor_krfe/Data/HIVGRPCG/data.fa\", \"../castor_krfe/Data/HIVGRPCG/class.csv\")\n #data = generateData(\"../castor_krfe/Data/HIVGRPCG/data.fa\")\n \n start_time_t = time.clock()\n\n for k in range_k_mers: \n print(\"\\n\\n Evaluating with k-mer:\", k)\n \n start_time = time.clock()\n k_mers = generate_K_mers(trainingData, k) #list of substring of size k: (if k = 2; k_mers= [AT, CG, AC, ...]) \n t_1 = time.clock() - start_time; print('generate_K_mers took', t_1, \"seconds\", \"feature size \", len(k_mers))\n\n start_time = time.clock()\n X, y = generateXYMatrice(trainingData, k_mers, k) # OCURERNCE MATRIX\n t_2 = time.clock() - start_time; print('generateXYMatrice took', t_2, \"seconds\")\n\n start_time = time.clock()\n X = maxMinNormalization(X)\n t_3 = time.clock() - start_time; print('maxMinNormalization took', t_3, \"seconds\")\n\n # THIS TKE TOO LONG TIME!!!!!, WE START WITH 1K FEATURES\n start_time = time.clock()\n #print(\"feature size \", len(k_mers))\n X, k_mers = recursiveFeatureElimination(X, y, k_mers, features_max)\n t_4 = time.clock() - start_time; print('recursiveFeatureElimination took', t_4, \"seconds\")\n \n \n labelEncodel = LabelEncoder()\n y = labelEncodel.fit_transform(y)\n \n # score = f1-measure, support = list of k-mers\n start_time = time.clock()\n scores, supports = evaluateFeatureSizes(X, y, k_mers, range_features, features_max, n_splits)\n t_5 = time.clock() - start_time; print('evaluateFeatureSizes took', t_5, \"seconds\")\n \n scores_list.append(scores)\n supports_list.append(supports) \n\n\n start_time = time.clock()\n best_k_mers, best_k_length = getOptimalSolution(scores_list, supports_list, range_k_mers, range_features, T)\n t_6 = time.clock() - start_time; print('getOptimalSolution took', t_6, \"seconds\")\n\n t_0 = time.clock() - start_time_t\n\n return best_k_mers, best_k_length, [t_0, t_1, t_2, t_3, t_4, t_5, t_6]\n\n\n\n# The same that getBestKmersAndFeatures, but it is used for comapare.py \ndef getBestKmersAndFeaturesMini(trainingData, k, range_features, T):\n n_splits = 5\n T = 0.99\n\n range_k_mers = range(k, k + 1)\n\n scores_list = []\n supports_list = []\n \n for k in range_k_mers: \n k_mers = generate_K_mers(trainingData, k) #list of substring of size k: (if k = 2; k_mers= [AT, CG, AC, ...]) \n X, y = generateXYMatrice(trainingData, k_mers, k) # OCURERNCE MATRIX \n X = maxMinNormalization(X)\n\n X, k_mers = recursiveFeatureElimination(X, y, k_mers, range_features[len(range_features)-1]) \n \n labelEncodel = LabelEncoder()\n y = labelEncodel.fit_transform(y) \n \n scores, supports = evaluateFeatureSizes(X, y, k_mers, range_features, range_features[len(range_features)-1], n_splits)\n \n scores_list.append(scores)\n supports_list.append(supports) \n\n best_k_mers, best_k_length = getOptimalSolution(scores_list, supports_list, range_k_mers, range_features, T)\n \n return best_k_mers, best_k_length\n \n###################################################################################################\n############################# TRAINING ###############################\n\ndef train_model(training_data, best_k_mers):\n\n # Generate matrices\n best_k_length = len(best_k_mers[0])\n X_train, y_train = generateXYMatrice(training_data, best_k_mers, best_k_length)\n\n # Implement and fit classifier\n clf = SVC(kernel = \"linear\", C = 1) \n clf.fit(X_train, y_train)\n \n return clf\n\ndef evaluation(clf, testing_data, best_k_mers):\n\n # Generate matrices\n best_k_length = len(best_k_mers[0])\n X_test, y_test = generateXYMatrice(testing_data, best_k_mers, best_k_length)\n\n\n # Realize prediction\n y_pred = clf.predict(X_test)\n \n # Calcul metric scores\n metrics = precision_recall_fscore_support(y_test, y_pred, average='weighted')\n acc = accuracy_score(y_test, y_pred)\n\n print('metrics: acc, precision, recall, fscore ', acc, metrics)\n return acc, metrics[0], metrics[1], metrics[2]\n \n###################################################################################################\n############################# TRAINING ###############################\n\ndef train_model(training_data, best_k_mers):\n\n # Generate matrices\n best_k_length = len(best_k_mers[0])\n X_train, y_train = generateXYMatrice(training_data, best_k_mers, best_k_length)\n\n # Implement and fit classifier\n clf = SVC(kernel = \"linear\", C = 1) \n clf.fit(X_train, y_train)\n \n return clf\n\ndef evaluation(clf, testing_data, best_k_mers):\n\n # Generate matrices\n best_k_length = len(best_k_mers[0])\n X_test, y_test = generateXYMatrice(testing_data, best_k_mers, best_k_length)\n\n\n # Realize prediction\n y_pred = clf.predict(X_test)\n \n # Calcul metric scores\n metrics = precision_recall_fscore_support(y_test, y_pred, average='weighted')\n acc = accuracy_score(y_test, y_pred)\n\n print('metrics: acc, precision, recall, fscore ', acc, metrics)\n return acc, metrics[0], metrics[1], metrics[2]\n\n\n\n\n###################################################################################################\n################################################################################################### \n\n\n\n\n\n\nif __name__ == \"__main__\" :\n # CMD\n # python3 viral/viral_classification/feature_extractor.py '/home/vicente/projects/BIOINFORMATICS/datasets/VIRAL/PAPILLOMA/HPVSPECG'\n\n\n #training_data = generateLabeledData(\"../Data/HIVGRPCG/data.fa\", \"../Data/HIVGRPCG/class.csv\")\n path = sys.argv[1] # folder path if fasta and csv\n #path = '/home/vicente/projects/BIOINFORMATICS/datasets/VIRAL/PAPILLOMA/HPVSPECG'\n best_k_mers, best_k_length, times = getBestKmersAndFeatures(path)\n print(\"Identified k =\", best_k_length)\n print(\"Identified k-mers =\", best_k_mers)\n\n ", "sub_path": "viral/viral_classification/feature_extractor.py", "file_name": "feature_extractor.py", "file_ext": "py", "file_size_in_byte": 18785, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "csv.reader", "line_number": 39, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 42, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 42, "usage_type": "name"}, {"api_name": "Bio.SeqIO.parse", "line_number": 52, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 52, "usage_type": "name"}, {"api_name": "re.match", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 144, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 159, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.RFE", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 191, "usage_type": "call"}, {"api_name": "sklearn.decomposition.TruncatedSVD", "line_number": 198, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 218, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.RFE", "line_number": 227, "usage_type": "call"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 235, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 302, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 306, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 310, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 311, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 315, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 315, "usage_type": "name"}, {"api_name": "time.clock", "line_number": 355, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 360, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 362, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 364, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 366, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 368, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 370, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 373, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 376, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 379, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 383, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 385, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 391, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 393, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 395, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 418, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 440, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 456, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 457, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 472, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 488, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 489, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 511, "usage_type": "attribute"}]} +{"seq_id": "228536597", "text": "import os\nimport sys\nimport datetime, time\nimport urllib.request\nimport requests\nimport selenium\nimport pandas as pd\nfrom bs4 import BeautifulSoup as bs\nfrom multiprocessing import Pool\nfrom time import localtime, strftime\nfrom datetime import timedelta\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException\n\n#Mail\nimport smtplib\nfrom smtplib import SMTP as st\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport critical as ct\n\n#terms\ndate = datetime.date.today()\ndelta = timedelta(days = 1)\nfromtd = date-delta\ntoday = \"20\"+str(date.strftime(\"%y/%m/%d\"))\nyester1 = \"20\"+str(fromtd.strftime(\"%y/%m/%d\"))\narr = []\nfor i in range(2):\n fromtd = fromtd-delta\n arr.append(\"20\"+str(fromtd.strftime(\"%y/%m/%d\")))\n\nyester2 = arr[0]\nyester3 = arr[1]\n\n\nclass Dictionary :\n def __init__(self):\n self.keyword_list = {}\n self.url_list = {}\n self.user_list = {}\n\n def setKeyword(self, company, keyword):\n self.keyword_list[company] = keyword\n\n def setUrl(self, company, keyword):\n self.url_list[company] = keyword\n\n def setUser(self, user, company):\n self.user_list[user] = company\n\n def getKeyword(self):\n return self.keyword_list\n\n def getUrl(self):\n return self.url_list\n\n def getUser(self):\n return self.user_list\n\nclass WebCrawl :\n def __init__(self):\n self.html_txt = ''\n \n def init_driver(self, url):\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n options.add_argument('lang=ko_KR')\n options.add_argument('window-size=1400,1000')\n driver = webdriver.Chrome('chromedriver', options = options)\n driver.implicitly_wait(5)\n driver.get(url=url)\n return driver\n\n ###### 국가철도공단 ######\n def crawl_koreaRail(self, driver, key):\n data = []\n options = ['','공고구분','공고번호','공고명','설계금액','공고게시일','진행상태']\n #date 폼 맞추기 \n fromD = yester2.replace('/','-')\n #search key\n searchbox = driver.find_element_by_class_name('search')\n searchbox.clear()\n searchbox.send_keys(key)\n \n #date => clear안됨 수정해야됨\n form = driver.find_element_by_xpath('/html/body/section/section[2]/form/article[1]/ul/li[4]/input[1]')\n form.click()\n form.clear()\n form.send_keys(fromD)\n\n #search\n posting = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.CSS_SELECTOR,'.btn_t4')))\n ActionChains(driver).click(posting).perform()\n\n html = driver.page_source\n\n start = html.find('')\n\n html = html[start:end]\n if html.find('자료가 없습니다. 다른 검색조건을 선택해주세요') != -1:\n return\n else:\n print('[%s]GET DATA...'%key)\n data.append(self.korea_getData())\n self.createHtml(data, options)\n \n ###### get data(국가철도공단) ######\n def korea_getData(self):\n data = []\n txt = ''\n arr = driver.find_elements_by_css_selector('body > section > section.Center > form > article.List_Area > div.Box > div.Grid > table > tbody > tr')\n\n for dt in arr:\n text = dt.text\n if text.find('개찰완료') == -1:\n arr = text.split(' ')\n temp = []\n txt = ''\n for i in range(len(arr)):\n if i<3 or i >= len(arr)-3:\n temp.append(arr[i])\n if i>2 and i')+7\n\n data.append(self.defence_organ())\n print('[%s]GET DATA...'%key)\n \n options = ['순번','업무구분','입찰구분','공고구분', '공고일자','G2B공고번호-차수\\n통합참조번호','판단번호','입찰건명','발주기관','생산능력제출 마감일시\\n입찰참가등록 마감일시\\n입찰서/견적서제출 마감일시','계약방법\\n입찰형태','기초예가']\n self.createCSVFile(data, options, '국방전자조달시스템.csv')\n self.createHtml(data, options)\n\n def createHtml(self, data, options):\n if len(data[0]) <= 0:\n return\n i=0\n src = 'https://ebid.kr.or.kr/open/info/bidDetail.do?icGgBeonho='\n txt = \"\"\"\n \n \"\"\"\n\n for dt in options:\n dt = dt.replace('\\n','
')\n txt+= ''\n txt+=''\n\n for dt in data:\n if len(dt) > 0:\n for item in dt:\n its = item.split('/')\n its.pop()\n txt+=''\n i=0\n icGgBeonho = ''\n for it in its:\n it = it.replace('\\n','
')\n if i==2:\n icGgBeonho = it\n if i==3:\n txt += ''\n else:\n txt += ''\n i+=1\n txt+=''\n\n txt += '
' + dt + '
'+ it + '' + it + '
'\n self.html_txt += txt\n \n def defence_organ(self) :\n data = []\n num = 0\n txt = ''\n text = driver.find_elements_by_class_name('sbgrid_datagrid_Output')\n\n if len(text) > 0:\n for dt in text:\n if num % 12 == 0 and num != 0:\n data.append(txt)\n num = 0\n txt = ''\n txt += dt.text+'/'\n num+=1\n return data\n else:\n return []\n \n ###### get data ######\n def getdata(self, url):\n data = []\n txt = ''\n num = 0\n res = requests.get(url)\n\n if res.status_code == 200:\n html = res.text\n soup = bs(html, 'html.parser')\n\n tbody = soup.select_one('tbody')\n tds = tbody.select('td')\n\n for td in tds:\n if num % 10 == 0 and num != 0:\n data.append(txt)\n txt = ''\n num = 0\n t = td.get_text().replace('/','+')\n txt += t+'/'\n num+=1\n else:\n print(\"[\"+res.status_code+\"] FAIL\")\n\n return data\n\n \n ###### check if next page is exist ######\n def check_nextPage(self, path, num):\n try:\n pageNum = int(driver.find_element_by_xpath(path+str(num)+']').text)\n return True\n except:\n return False\n\n ###### create & write csv file (for accumulating) by organizations ######\n def writeFile(self, _list, data, fileName):\n index = []\n index.append(0)\n try:\n csv = pd.DataFrame(_list, index=index)\n if not os.path.exists(fileName):\n csv.to_csv(fileName, encoding='euc-kr', index=False, mode='w')\n print(\"[SUCCESS] WRITE FILE!! (\"+fileName+\")\")\n else:\n csv.to_csv(fileName, encoding='euc-kr', index=False, mode='a', header=False)\n print(\"[SUCCESS] APPEND FILE!! (\"+fileName+\")\")\n\n except:\n print(\"[FAIL] FAIL\")\n self.finishCrawl(driver)\n\n def createCSVFile(self, data, options, fileName):\n _list = {}\n \n for dt in data:\n for strs in dt:\n if(strs!=''):\n s = strs.split('/')\n s.pop()\n if '국토' in fileName:\n url = 'https://ebid.kr.or.kr/open/info/bidDetail.do?icGgBeonho='+s[1]+'&todoPopup=Y'\n s.append(url)\n for idx in range(len(options)):\n if '+' in s[idx]:\n s[idx] = s[idx].replace('+','/')\n _list[options[idx]] = s[idx]\n self.writeFile(_list, data, fileName)\n\n ###### make html texts to send client ######\n def writeHtml(self, url):\n req = requests.get(url)\n html = req.text\n\n start = html.find('')+7\n\n html_table = html[start:end]\n self.html_txt += html_table\n\n ###### dispose driver ######\n def disposeDriver(self,driver):\n driver.quit()\n\n ###### return html text ######\n def gethtmlText(self):\n return self.html_txt\n\nclass Mail:\n\n def __init__(self):\n self.from_ = ct.get_myAddress()\n self.pwd_ = ct.get_myPwd()\n\n def sendMail(self, txt):\n html_txt = \"\"\"\"\"\" + txt + '/body>'\n html_txt = self.correctTxt(txt)\n to_ = get_userAddress()\n msg = MIMEMultipart('alternative')\n msg['Subject'] = '나라장터'\n msg['From'] = self.from_\n msg['To'] = to_\n\n part = MIMEText(html_txt, 'html')\n\n msg.attach(part)\n mail = smtplib.SMTP('smtp.gmail.com', 587)\n mail.ehlo()\n mail.starttls()\n mail.login(self.from_, self.pwd_)\n mail.sendmail(self.from_, to_, msg.as_string())\n mail.quit()\n print('\\n 메일을 보냈습니다.')\n\n def correctTxt(self, txt):\n txt = txt.replace('')+9\n str1 = txt[start:to]\n txt = txt.replace(str1, '')\n return txt\n \ndef fileOpen(file):\n dic = Dictionary()\n\n try:\n f=open(file, \"r\")\n except:\n sys.stderr.write(\"no file:%s\\n\"%file)\n\n while True:\n line = f.readline()\n if not line:\n f.close()\n break\n\n key = line.split(\"~\")[0]\n val = line.split(\"~\")[1].rstrip('\\n')\n\n if(file.find('keyword')!=-1):\n dic.setKeyword(key, val)\n elif(file.find('user')!=-1):\n dic.setUser(key, val)\n else:\n dic.setUrl(key, val)\n \n return dic\n\nif __name__ == '__main__':\n \n #setdata\n keyword = fileOpen(\"keyword.txt\")\n url = fileOpen(\"url.txt\")\n user = fileOpen(\"user.txt\")\n\n #crawl\n keyList = keyword.getKeyword()\n urlList = url.getUrl()\n result = ''\n for key,value in keyList.items():\n \n if(key == '서울교통공사'):\n print(\"\\n--------------------------------------------\")\n print('\\t'+key+\" CRAWLING...\")\n crawl = WebCrawl()\n driver = crawl.init_driver(urlList['서울교통공사'])\n x = value.split(',')\n for idx in x:\n crawl.crawl_by_region(driver, idx, '서울교통공사')\n crawl.disposeDriver(driver)\n result += '

서울교통공사


'\n result += crawl.gethtmlText()\n\n \n if(key == '국가철도공단'):\n print(\"\\n--------------------------------------------\")\n print('\\t'+key+\" CRAWLING...\")\n crawl = WebCrawl()\n driver = crawl.init_driver(urlList['국가철도공단'])\n x = value.split(',')\n for idx in x:\n crawl.crawl_koreaRail(driver, idx)\n crawl.disposeDriver(driver)\n result += '

국가철도공단


'\n result += crawl.gethtmlText()\n \n if(key == '나라장터(안산시)'):\n print(\"\\n--------------------------------------------\")\n print('\\t'+key+\" CRAWLING...\")\n crawl = WebCrawl()\n driver = crawl.init_driver(urlList['나라장터(안산시)'])\n x = value.split(',')\n for idx in x:\n crawl.crawl_by_region(driver, idx, '안산시')\n crawl.disposeDriver(driver)\n result += '

안산시


'\n result += crawl.gethtmlText()\n\n if(key == '나라장터'):\n print(\"\\n--------------------------------------------\")\n print('\\t'+key+\" CRAWLING...\")\n crawl = WebCrawl()\n driver = crawl.init_driver(urlList['나라장터'])\n x = value.split(',')\n for idx in x:\n crawl.crawl_by_region(driver, idx, 'nara')\n crawl.disposeDriver(driver)\n result += '

나라장터


'\n result += crawl.gethtmlText()\n\n if(key == '부천시청'):\n print(\"\\n--------------------------------------------\")\n print('\\t'+key+\" CRAWLING...\")\n crawl = WebCrawl()\n driver = crawl.init_driver(urlList['부천시청'])\n x = value.split(',')\n for idx in x:\n crawl.crawl_by_region(driver, idx, '부천')\n crawl.disposeDriver(driver)\n result += '

부천시청


'\n result += crawl.gethtmlText()\n \n if(key == '국방전자조달시스템'):\n print(\"\\n--------------------------------------------\")\n print('\\t'+key+\" CRAWLING...\")\n crawl = WebCrawl()\n driver = crawl.init_driver(urlList['국방전자조달시스템'])\n x = value.split(',')\n for idx in x:\n crawl.crawl_defence_organ(driver, idx)\n crawl.disposeDriver(driver)\n result += '

국방전자조달시스템


'\n result += crawl.gethtmlText()\n \n #메일 보내\n mail = Mail()\n mail.sendMail(result)\n \n \n", "sub_path": "web crawling _ python/crawling.py", "file_name": "crawling.py", "file_ext": "py", "file_size_in_byte": 17630, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "datetime.date.today", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 29, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 30, "usage_type": "call"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 72, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 72, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 76, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 76, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 99, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 99, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 99, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 99, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 99, "usage_type": "name"}, {"api_name": "selenium.webdriver.ActionChains", "line_number": 100, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 147, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 194, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 293, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 297, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 329, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 330, "usage_type": "call"}, {"api_name": "os.path", "line_number": 330, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 360, "usage_type": "call"}, {"api_name": "critical.get_myAddress", "line_number": 380, "usage_type": "call"}, {"api_name": "critical.get_myPwd", "line_number": 381, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 387, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 392, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 395, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 420, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 420, "usage_type": "attribute"}]} +{"seq_id": "310820690", "text": "#!/bin/env python3\n\nimport sys\nfrom collections import defaultdict\nfrom adj_list_undirected_graph import *\n\n\ndef buildGraph(wordList):\n g = Graph()\n buckets = defaultdict(list)\n for word in wordList:\n for i in range(len(word)):\n bucket = word[:i] + '_' + word[i+1:]\n buckets[bucket].append(word)\n\n for bucket in buckets.keys():\n for word1 in buckets[bucket]:\n for word2 in buckets[bucket]:\n if word1 != word2:\n g.addEdge(word1, word2)\n\n #print(buckets.keys())\n return g\n\ndef read_data():\n # f = open(sys.argv[1], 'r')\n f = open('data/wordlists_small.txt', 'r')\n wordList = f.read().splitlines()\n return wordList\n \n\nif __name__ == '__main__':\n wordList = read_data()\n G = buildGraph(wordList)\n print(G)\n", "sub_path": "algorithms_DS/COURSES/pythonds/chapter_7_graphs/ladder_prob_undirected.py", "file_name": "ladder_prob_undirected.py", "file_ext": "py", "file_size_in_byte": 830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "collections.defaultdict", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "115821543", "text": "import numpy as np\n\ndata=np.loadtxt('populations.txt')\n#print(data)\n\nyear, hare, lynx, carrot=data.T\n\nfrom matplotlib import pyplot as plt\nplt.axes([0.2, 0.1, 0.5, 0.8])\nplt.plot(year, hare, year, lynx, year, carrot)\nplt.legend(('hare','lynx', 'carrot'), loc=(1.05,0.5))\nplt.show()\n\n", "sub_path": "dataStatistics1.py", "file_name": "dataStatistics1.py", "file_ext": "py", "file_size_in_byte": 283, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.loadtxt", "line_number": 3, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "90103549", "text": "#!/usr/bin/env python3\n# --------------------( LICENSE )--------------------\n# Copyright (c) 2014-2021 Beartype authors.\n# See \"LICENSE\" for further details.\n\n'''\n**Callable argument iterator utility unit tests.**\n\nThis submodule unit tests the public API of the private\n:mod:`beartype._util.utilfunc.arg.utilfuncargiter` submodule.\n'''\n\n# ....................{ IMPORTS }....................\n#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n# WARNING: To raise human-readable test errors, avoid importing from\n# package-specific submodules at module scope.\n#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n# ....................{ TESTS ~ iterator }....................\ndef test_iter_func_args() -> None:\n '''\n Test the\n :func:`beartype._util.func.arg.utilfuncargtest.iter_func_args` generator.\n '''\n\n # Defer heavyweight imports.\n from beartype.roar._roarexc import _BeartypeUtilCallableException\n from beartype._util.func.arg.utilfuncargiter import (\n ParameterKind,\n ParameterMandatory,\n iter_func_args,\n )\n from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_8\n from beartype_test.a00_unit.data.func.data_func import (\n func_args_0,\n func_args_1_flex_mandatory,\n func_args_1_varpos,\n func_args_1_kwonly_mandatory,\n func_args_2_flex_mandatory,\n func_args_5_flex_mandatory_varpos_varkw,\n )\n from pytest import raises\n\n # Assert this iterator returns the empty generator for an argument-less\n # callable, explicitly coerced into a tuple to trivialize testing.\n assert len(tuple(iter_func_args(func_args_0))) == 0\n\n # Assert this iterator returns the expected generator for argumentative\n # callables accepting multiple kinds of parameters, explicitly coerced into\n # tuples to trivialize testing.\n assert _iter_func_args_tuple(func_args_1_flex_mandatory) == (\n ('had_one_fair_daughter', ParameterKind.POSITIONAL_OR_KEYWORD, ParameterMandatory,),\n )\n assert _iter_func_args_tuple(func_args_1_varpos) == (\n ('and_in_her_his_one_delight', ParameterKind.VAR_POSITIONAL, ParameterMandatory,),\n )\n assert _iter_func_args_tuple(func_args_1_kwonly_mandatory) == (\n ('when_can_I_take_you_from_this_place', ParameterKind.KEYWORD_ONLY, ParameterMandatory,),\n )\n assert _iter_func_args_tuple(func_args_2_flex_mandatory) == (\n ('thick_with_wet_woods', ParameterKind.POSITIONAL_OR_KEYWORD, ParameterMandatory,),\n ('and_many_a_beast_therein', ParameterKind.POSITIONAL_OR_KEYWORD, ParameterMandatory,),\n )\n assert _iter_func_args_tuple(func_args_5_flex_mandatory_varpos_varkw) == (\n ('we_are_selfish_men', ParameterKind.POSITIONAL_OR_KEYWORD, ParameterMandatory,),\n ('oh_raise_us_up', ParameterKind.POSITIONAL_OR_KEYWORD, ParameterMandatory,),\n ('and_give_us', ParameterKind.VAR_POSITIONAL, ParameterMandatory,),\n ('return_to_us_again', ParameterKind.KEYWORD_ONLY, 'Of inward happiness.',),\n ('manners_virtue_freedom_power', ParameterKind.VAR_KEYWORD, ParameterMandatory,),\n )\n\n # If the active Python interpreter targets Python >= 3.8 and thus supports\n # PEP 570-compliant positional-only parameters...\n if IS_PYTHON_AT_LEAST_3_8:\n # Defer version-specific imports.\n from beartype_test.a00_unit.data.func.data_pep570 import (\n func_args_10_all_except_flex_mandatory)\n\n # Assert this iterator returns the expected generator for argumentative\n # callables accepting multiple kinds of parameters -- including\n # positional-only parameters.\n assert _iter_func_args_tuple(func_args_10_all_except_flex_mandatory) == (\n ('in_solitude_i_wander', ParameterKind.POSITIONAL_ONLY, ParameterMandatory,),\n ('through_the_vast_enchanted_forest', ParameterKind.POSITIONAL_ONLY, ParameterMandatory,),\n ('the_surrounding_skies', ParameterKind.POSITIONAL_ONLY, 'are one',),\n ('torn_apart_by', ParameterKind.POSITIONAL_OR_KEYWORD, 'the phenomenon of lightning',),\n ('rain_is_pouring_down', ParameterKind.POSITIONAL_OR_KEYWORD, 'my now shivering shoulders',),\n ('in_the_rain_my_tears_are_forever_lost', ParameterKind.VAR_POSITIONAL, ParameterMandatory,),\n ('the_darkened_oaks_are_my_only_shelter', ParameterKind.KEYWORD_ONLY, ParameterMandatory,),\n ('red_leaves_are_blown_by', ParameterKind.KEYWORD_ONLY, 'the wind',),\n ('an_ebony_raven_now_catches', ParameterKind.KEYWORD_ONLY, 'my eye.',),\n ('sitting_in_calmness', ParameterKind.VAR_KEYWORD, ParameterMandatory,),\n )\n\n # Assert this iterator returns a generator raising the expected exception\n # when passed a C-based callable.\n with raises(_BeartypeUtilCallableException):\n next(iter_func_args(iter))\n\n# ....................{ PRIVATE ~ coercer }....................\ndef _iter_func_args_tuple(func) -> tuple:\n '''\n Trivial-to-test **parameter metadata tuple** (i.e., tuple of one 3-tuple\n ``({arg_name}, {arg_kind}, {arg_default_value_or_mandatory})`` describing\n each parameter accepted by the passed callable) internally coerced from the\n non-trivial-to-test **parameter metadata generator** (i.e., generator\n implicitly created and returned by the\n :func:`beartype._util.func.arg.utilfuncargiter.iter_func_args` generator\n callable) for the passed pure-Python callable.\n\n Parameters\n ----------\n func : Callable\n Pure-Python callable to be inspected.\n\n Returns\n ----------\n Tuple[Tuple[str, EnumMemberType, Any], ...]\n Variable-length tuple nesting one 3-tuple ``(arg_name, arg_kind,\n arg_default)`` for each parameter accepted by this callable.\n '''\n\n # Defer heavyweight imports.\n from beartype._util.func.arg.utilfuncargiter import iter_func_args\n from collections.abc import Generator\n\n # Parameter metadata generator created by this iterator for that callable.\n param_meta_generator = iter_func_args(func)\n\n # Assert this generator actually is a generator.\n assert isinstance(param_meta_generator, Generator)\n\n # Return a tuple comprehension of...\n return tuple(\n # 3-tuples describing each parameter accepted by an arbitrary callable\n # passed to this generator...\n (\n param_meta.name,\n param_meta.kind,\n param_meta.default_value_or_mandatory,\n )\n # For each parameter metadata object yielded by this generator.\n for param_meta in param_meta_generator\n )\n", "sub_path": "beartype_test/a00_unit/a00_util/func/arg/test_utilfuncargiter.py", "file_name": "test_utilfuncargiter.py", "file_ext": "py", "file_size_in_byte": 6724, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "beartype._util.func.arg.utilfuncargiter.iter_func_args", "line_number": 46, "usage_type": "call"}, {"api_name": "beartype_test.a00_unit.data.func.data_func.func_args_0", "line_number": 46, "usage_type": "argument"}, {"api_name": "beartype_test.a00_unit.data.func.data_func.func_args_1_flex_mandatory", "line_number": 51, "usage_type": "argument"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.POSITIONAL_OR_KEYWORD", "line_number": 52, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 52, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 52, "usage_type": "name"}, {"api_name": "beartype_test.a00_unit.data.func.data_func.func_args_1_varpos", "line_number": 54, "usage_type": "argument"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.VAR_POSITIONAL", "line_number": 55, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 55, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 55, "usage_type": "name"}, {"api_name": "beartype_test.a00_unit.data.func.data_func.func_args_1_kwonly_mandatory", "line_number": 57, "usage_type": "argument"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.KEYWORD_ONLY", "line_number": 58, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 58, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 58, "usage_type": "name"}, {"api_name": "beartype_test.a00_unit.data.func.data_func.func_args_2_flex_mandatory", "line_number": 60, "usage_type": "argument"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.POSITIONAL_OR_KEYWORD", "line_number": 61, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 61, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 61, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.POSITIONAL_OR_KEYWORD", "line_number": 62, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 62, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 62, "usage_type": "name"}, {"api_name": "beartype_test.a00_unit.data.func.data_func.func_args_5_flex_mandatory_varpos_varkw", "line_number": 64, "usage_type": "argument"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.POSITIONAL_OR_KEYWORD", "line_number": 65, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 65, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 65, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.POSITIONAL_OR_KEYWORD", "line_number": 66, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 66, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 66, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.VAR_POSITIONAL", "line_number": 67, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 67, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 67, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.KEYWORD_ONLY", "line_number": 68, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 68, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.VAR_KEYWORD", "line_number": 69, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 69, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 69, "usage_type": "name"}, {"api_name": "beartype._util.py.utilpyversion.IS_PYTHON_AT_LEAST_3_8", "line_number": 74, "usage_type": "name"}, {"api_name": "beartype_test.a00_unit.data.func.data_pep570.func_args_10_all_except_flex_mandatory", "line_number": 82, "usage_type": "argument"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.POSITIONAL_ONLY", "line_number": 83, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 83, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 83, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.POSITIONAL_ONLY", "line_number": 84, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 84, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 84, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.POSITIONAL_ONLY", "line_number": 85, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 85, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.POSITIONAL_OR_KEYWORD", "line_number": 86, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 86, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.POSITIONAL_OR_KEYWORD", "line_number": 87, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 87, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.VAR_POSITIONAL", "line_number": 88, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 88, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 88, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.KEYWORD_ONLY", "line_number": 89, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 89, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 89, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.KEYWORD_ONLY", "line_number": 90, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 90, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.KEYWORD_ONLY", "line_number": 91, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 91, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind.VAR_KEYWORD", "line_number": 92, "usage_type": "attribute"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterKind", "line_number": 92, "usage_type": "name"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.ParameterMandatory", "line_number": 92, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 97, "usage_type": "call"}, {"api_name": "beartype.roar._roarexc._BeartypeUtilCallableException", "line_number": 97, "usage_type": "argument"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.iter_func_args", "line_number": 98, "usage_type": "call"}, {"api_name": "beartype._util.func.arg.utilfuncargiter.iter_func_args", "line_number": 128, "usage_type": "call"}, {"api_name": "collections.abc.Generator", "line_number": 131, "usage_type": "argument"}]} +{"seq_id": "402738998", "text": "#!/usr/bin/python\n\nimport os\nimport argparse\nimport glob\n\n####################\n# Informations #\n####################\n \ncourriel = 'francois-xavier.babin@outlook.fr'\t\t\t\t\t\t\t\t\t\t\t\t\t\n\n#creation of a parser which gather the arguments of the program \nparser=argparse.ArgumentParser(\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\tdescription=''' program to generate fastqc qualities corresponding to fastq files in the specified directory''',\n\tepilog=\"\"\"The input files must be in 'fastq' format.\n\tcontact: \"\"\"+courriel+\"\"\"\\n\\n\"\"\")\n\nparser.add_argument('-i', \ttype=str, \t\tdefault=\"../raw_files/pepper/\", \t\t\t\t\t\thelp='input directory (default: %(default)s)')\t\t\t\t\nparser.add_argument('-o', \ttype=str, \t\tdefault=\"../raw_files/pepper/qualities\", \t\t\t\thelp='output directory (default: %(default)s)')\t\t\t\nparser.add_argument('-t', \ttype=int, \t\tdefault=1, \t\t\t\t\t\t\t\t\t\t\t\thelp='threads (beware do not use too many threads if your computer is not able to !!!) (default: %(default)s)')\t\t\t\t\t\nparser.add_argument('-pr', \ttype=str, \t\tdefault=\"PM\", \t\t\t\t\t\t\t\t\t\t\thelp='prefix (default: %(default)s)')\t\t\t\t\t\t\nparser.add_argument('-f', \ttype=str, \t\tdefault=\"fastqc\", \t\t\t\t\t\t\t\t\t\thelp='fastqc location (default: %(default)s)')\nparser.add_argument('-c', \ttype=str, \t\tdefault=\"../Tools/contaminants_illumina_enrichi.fa\", \thelp='contaminants file (default: %(default)s)')\n\nargs=parser.parse_args()\n\n################\n# Function #\n################\n\n#Function try except\ndef try_except(message,command):\n\ttry:\n\t\tprint(message+\"\\n\")\n\t\tcommand = os.system(command)\n\n\t\tif command == 0:\n\t\t\tprint(message+\" done ...\\n\")\n\t\telse:\n\t\t\tprint(message+\" failed ...\\n\")\n\texcept OSError as e:\n\t\tprint(\"Execution failed \", e)\n\n#####################\n# Verifications #\n#####################\n\n#if the input directory doesn't exists it will create it\nif not os.path.exists(args.i):\n\ttry_except(\"the \"+args.i+\" doesn\\'t exists ... creating an empty directory\", \"mkdir -p \"+args.i)\n\n#if the qualities directory doesn't exists it will create it\nif not os.path.exists(args.o):\n\ttry_except(\"the \"+args.o+\" doesn\\'t exists ... creating an empty directory\",\"mkdir -p \"+args.o)\n\nif not os.path.isfile(args.c) :\n\tprint('the contaminant file ('+args.c+') doest not exist !')\n\t\t\n###################\n# Instructions #\n###################\n\n#fastqc qualities generation\ntry_except(\"Generating fastqc qualities ...\",\"parallel --gnu -j\"+str(args.t)+\" '\"+args.f+\" -o \"+args.o+\" --nogroup -c \"+args.c+\"' ::: \"+args.i+args.pr+\"*.fastq\")\n", "sub_path": "scripts/fastqc_quality.py", "file_name": "fastqc_quality.py", "file_ext": "py", "file_size_in_byte": 2449, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "os.system", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}]} +{"seq_id": "155322249", "text": "import urllib.request\nimport requests\nimport json\nfrom re import findall\nfrom bs4 import BeautifulSoup\nfrom webapp.extensions import mongo\n\n\ndef get_html(url):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:65.0) Gecko/20100101 Firefox/65.0'\n }\n try:\n result = requests.get(url, headers=headers)\n result.raise_for_status()\n return result.text\n except(requests.RequestException, ValueError):\n print(\"Сетевая ошибка\")\n return False\n\n\ndef save_url(url):\n url_str = str(url)\n mydict = {\"url\": url_str}\n site_url_collection = mongo.db.site_url\n site_url_collection.insert_one(mydict)\n\n\ndef get_url():\n page_list = [i for i in range(1, 2)]\n for page in page_list:\n url_str = \"https://letsearch.ru/search?text=host%3Aby&s=0&p=\" + str(page)\n html = get_html(url_str)\n if html:\n soup = BeautifulSoup(html, 'html.parser')\n all_url = soup.find('table', class_='search-result__').findAll('td', class_='ico')\n print(all_url)\n for site_url in all_url:\n url = site_url.find('a').text\n save_url(url)\n\n\ndef get_meta(name):\n url_str = \"http://\" + name\n check = requests.get(url_str) # проверка http https\n html = get_html(check.url)\n if html:\n soup = BeautifulSoup(html, 'html.parser')\n title = soup.find('title').text\n description = soup.find('meta', {\"name\": \"description\"})['content']\n url = check.url\n # keywords = soup.find('meta', {\"name\":\"keywords\"})['content']\n result = {\n \"title\": title,\n \"description\": description,\n \"url\": url,\n # \"keywords\": keywords\n }\n return result\n\n\ndef get_data(name):\n url_str = \"http://api.whois.vu/?q=\" + name\n\n with urllib.request.urlopen(url_str) as url:\n data = json.loads(url.read().decode())\n\n meta = get_meta(name)\n title = meta[\"title\"]\n description = meta[\"description\"] \n url = meta[\"url\"]\n # keywords = meta[\"keywords\"]\n\n domain = data[\"domain\"]\n domain_type = data[\"type\"]\n registrar = data[\"registrar\"]\n\n text = data[\"whois\"]\n dates = get_dates(text)\n creation_date = dates[\"creation_date\"]\n expiration_date = dates[\"expiration_date\"]\n\n site_collection = mongo.db.site_data\n site = {\n \"title\": title,\n \"description\": description,\n \"url\": url,\n # \"keywords\": keywords,\n \"domain\": domain,\n \"domain_type\": domain_type,\n \"registrar\": registrar,\n \"creation_date\": creation_date,\n \"expiration_date\": expiration_date\n }\n site_collection.insert_one(site)\n\n\ndef get_dates(text):\n # Creation Date - дата создания домена\n # Expiration Date/free-date - дата окончания действия домена\n # Updated Date/Last updated - последняя дата обновления\n cd_pattern = r\"Creation Date: \\d{4}-\\d\\d-\\d\\d|created: \\d{4}-\\d\\d-\\d\\d\"\n ed_pattern = r\"Expiration Date: \\d{4}-\\d\\d-\\d\\d|paid-till: \\d{4}-\\d\\d-\\d\\d\"\n date_pattern = r\"\\d{4}-\\d\\d-\\d\\d\"\n creation_date = findall(date_pattern, findall(cd_pattern, text)[0])\n expiration_date = findall(date_pattern, findall(ed_pattern, text)[0])\n result = {\n \"creation_date\": creation_date[0],\n \"expiration_date\": expiration_date[0]\n }\n return result\n\n\ndef get_data_all():\n site_list_url = mongo.db.site_url\n result = site_list_url.find({})\n for x in result:\n get_data(x['url'])\n\n\n# def fav_col(domain, domain_type, registrar, creation_date, expiration_date):\n# mydict = {\"domain\": domain,\n# \"domain_type\": domain_type,\n# \"registrar\": registrar,\n# \"creation_date\": creation_date,\n# \"expiration_date\": expiration_date}\n# site_collection.insert_one(mydict)\n", "sub_path": "webapp/site/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 3968, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.RequestException", "line_number": 17, "usage_type": "attribute"}, {"api_name": "webapp.extensions.mongo.db", "line_number": 25, "usage_type": "attribute"}, {"api_name": "webapp.extensions.mongo", "line_number": 25, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 45, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 48, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 65, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 65, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 65, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 66, "usage_type": "call"}, {"api_name": "webapp.extensions.mongo.db", "line_number": 83, "usage_type": "attribute"}, {"api_name": "webapp.extensions.mongo", "line_number": 83, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 105, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 106, "usage_type": "call"}, {"api_name": "webapp.extensions.mongo.db", "line_number": 115, "usage_type": "attribute"}, {"api_name": "webapp.extensions.mongo", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "366613462", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('50_Startups.csv')\nx = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 4].values\n\ndummy = pd.get_dummies(pd.Series(x[:, 3]))\ndata = pd.DataFrame({'R&D Spent':x[:,0],'Administration':x[:,1], 'Marketing Spent':x[:,2]})\ndata = pd.concat([dummy, data], axis=1)\nx = data.iloc[:, :].values\nx = x.astype(float) \n\n# Dummy Variable trap:\nx = x[:, 1:]\n\nimport statsmodels.formula.api as sm\nx = np.append(np.ones((50, 1)).astype(int), x, axis = 1)\ndef backwardElimination(x, sl):\n numVars = len(x[0])\n for i in range(0, numVars):\n regressor_OLS = sm.OLS(y, x).fit()\n maxVar = max(regressor_OLS.pvalues).astype(float)\n if maxVar > sl:\n for j in range(0, numVars - i):\n if (regressor_OLS.pvalues[j].astype(float) == maxVar):\n x = np.delete(x, j, axis = 1)\n regressor_OLS.summary()\n return x\n\nSL = 0.05\nx_opt = backwardElimination(x, SL)\n", "sub_path": "Part 2 - Regression/2. Multiple Linear Regression/auto_implement_backward_elimination.py", "file_name": "auto_implement_backward_elimination.py", "file_ext": "py", "file_size_in_byte": 1035, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 22, "usage_type": "call"}, {"api_name": "statsmodels.formula.api.OLS", "line_number": 26, "usage_type": "call"}, {"api_name": "statsmodels.formula.api", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.delete", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "228539367", "text": "# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport pystan\nimport pandas as pd\nimport scipy.special\nimport scipy.optimize\nimport pickle\nimport statsmodels.tools.numdiff as smnd\nfrom .viz import bokeh_traceplot\nfrom .stats import compute_hpd\n\n\nclass StanModel(object):\n R\"\"\"\n Custom StanModel class for crafting and sampling from Stan\n models.\n \"\"\"\n def __init__(self, file, data_dict=None, samples=None, force_compile=False):\n \"\"\"\n Parameters\n ----------\n model: str\n Relative path to saved Stan model code. To deter bad habits,\n this class does not accept a string as the model code. Save \n your Stan models. \n data_dict: dictionary\n Dictonary of all data block parameters for the model.\n force_compile: bool\n If True, model will be forced to compile. If False, \n a precompiled file will be loaded if present. \n \"\"\"\n if '.pkl' in file:\n s = _load(file)\n self.model = s[0]\n self.samples = s[1]\n else:\n self.model = loadStanModel(file, force=force_compile)\n self.data = data_dict\n self.samples = samples\n self.df = None\n \n def sample(self, data_dict=None, iter=2000, chains=4, return_df=True, **kwargs):\n \"\"\"\n Samples the assembled model given the supplied data dictionary\n and returns output as a dataframe.\n \"\"\"\n if data_dict == None:\n data_dict = self.data\n self.chains = chains\n self.iter = iter\n print('Beginning sampling...')\n self.samples = self.model.sampling(data_dict, \n chains=chains, iter=iter, **kwargs)\n print('finished sampling!')\n if return_df:\n self.df = self.samples.to_dataframe(diagnostics=True)\n return [self.samples, self.df]\n else:\n return self.samples\n \n # Pickling objects\n def dump(fname):\n \"\"\"Saves StanFit4Model object and sampling summary as a pickled dictionary.\"\"\"\n with open(f\"{fname.split('.')[0]}.pkl\", 'wb') as _file:\n pickle.dump({'model' : self.model, 'fit' : self.samples}, _file, protocol=-1)\n \n def _load(fname):\n with open(file, 'rb') as _file:\n fit_dict = pickle.load(_file)\n self.model = fit_dict[0]\n self.samples = fit_dict[1]\n return [self.model, self.samples]\n \n # Diagnostics\n def check_divergence(self, thresh=0.0005, return_values=True, quiet=False):\n \"\"\"\n Computes the fraction of diverging samples\n \"\"\"\n if self.samples == None:\n raise RuntimeError('Divergence is not defined without sampling. Please sample your model first')\n if self.df == None:\n self.df = self.samples.to_datframe(diagnostics=True)\n\n n_div = np.sum(self.df['divergent__']) \n div_frac = n_div / len(self.df) * 100\n if div_frac == 0:\n statement = \"No diverging samples found. Nicely done.\"\n if div_frac < thresh:\n statement = \"Diverging samples below {} % ({} of {} samples diverging).\".format(thresh * 100, n_div, len(self.df)) \n else:\n statement = \"Warning, {} % of samples are diverging. Reparameterize your model or adjust adapt_delta above 0.8.\".format(div_frac)\n \n if quiet is not False:\n print(statement)\n if return_values:\n return {'statement':statement, 'n_diverging':n_div, 'n_samples': len(self.df), 'diverging_fraction':div_frac}\n else:\n return statement\n\n def check_rhat(self, return_values=True, quiet=False):\n \"\"\"\n Determines the Gelman-Rubin statistic (R-hat). If 0.9 < r-hat < 1.1, the sampler has converged. \n \"\"\"\n if self.samples == None:\n raise RuntimeError('R-hat not defined without sampling. Please sample your model first.')\n if self.df == None:\n self.df = self.samples.to_dataframe(diagnostics=True)\n raise RuntimeError('Not yet implemented!')\n\n def check_n_effective(self, thresh=0.001, return_values=True, quiet=False):\n if self.samples == None: \n raise RuntimeError('n_effective / N not defined without sampling. Please sample your model first.')\n if self.df == None:\n self.df = self.samples.to_dataframe(diagnostics=True)\n raise RuntimeError('Not yet implemented!')\n\n def check_diagnostics(self, save_summary=False, fname=None, return_values=True, quiet=False):\n \"\"\"\n Checks all sampling diagnostics. \n\n Parameters\n ----------\n save_summary: bool\n If True, a summary file will be saved. fname is required. \n fname: str\n Desired filename of summary file. Only required if save_summary is True.\n return_values: bool\n If True, a dictionary of diagnostics is returned.\n quiet: bool\n If True, summary will not be printed to screen. Default is False.\n \"\"\"\n raise RuntimeError('Not yet implemented!')\n \n def summarize_parameters(self, parnames=[], mass_frac=0.95):\n \"\"\"\n Summarizes all or a subset of parameters from a Stan model. \n \n Parameters\n ----------\n parnames: list\n List of desired parnames. If left empty, all parameters \n are summarized and returned. \n mass_frac: float [0, 1]\n The probability mass fraction for the HPD. Default is \n the 95% credible region. \n \n Returns\n -------\n summary_df: pandas DataFrame\n Dataframe of summarized parameters. The columns are as\n follows:\n parameter = name of parameter in Stan model\n dimension = index (dimension) of the parameter\n mean = mean of samples\n median = median of samples\n mode = parameter value when the log posterior is maximized\n hpd_min = minimum bound of the highest probability density\n defined by the mass fraction.\n hpd_max = upper bound of the highest probability density\n defined by the mass fraction\n \"\"\"\n # Extract the sampling information and find the mode\n samples = self.samples\n fit = samples.extract()\n mode_ind = np.argmax(fit['lp__'])\n \n # Get a list of all parameters defined in the model and assign a dimension\n pars = samples.model_pars\n \n # Convert the dimensions for each parameter to integers. \n _dims = []\n for d in samples.par_dims:\n if len(d) == 0:\n _dims.append(1)\n else:\n _dims.append(int(d[0]))\n \n par_dims = {p:v for p, v in zip(pars, _dims)}\n if len(parnames) != 0:\n pars = parnames\n desired_pars = {k:v for k, v in par_dims.items() if k in parnames}\n par_dims = desired_pars\n \n # Iterate through each parameter and compute the aggregate properties. \n df = pd.DataFrame([], columns=['parameter', 'dimension', 'mean'\n 'mode', 'median', 'hpd_min',\n 'hpd_max', 'mass_fraction']) \n for par, dim in par_dims.items():\n par_samples = fit[par]\n if dim == 1:\n par_samples = par_samples[:, np.newaxis]\n for j in range(dim):\n # Compute the summary statistics\n par_mode = par_samples[:, j][mode_ind]\n par_mean = np.mean(par_samples[:, j])\n par_median = np.median(par_samples[:, j])\n hpd_min, hpd_max = compute_hpd(par_samples[:, j], mass_frac=mass_frac)\n \n # Assemble a dictionary to append to the data frame\n par_dict ={'parameter':par,\n 'dimension': j + 1,\n 'mean': par_mean,\n 'mode': par_mode,\n 'median': par_median,\n 'hpd_min': hpd_min,\n 'hpd_max': hpd_max,\n 'mass_fraction': mass_frac}\n df = df.append(par_dict, ignore_index=True)\n df['dimension'] = df['dimension'].astype(int) \n return df \n\n \n # Vizualization \n def traceplot(self, varnames=None):\n \"\"\"\n Shows the sampling trace and distributions for desired varnames\n See documentation for mwc.viz.bokeh_traceplot for more details.\n \"\"\"\n return bokeh_traceplot(self.samples, varnames=varnames)\n\n \n\n \n \n\ndef loadStanModel(fname, force=False):\n \"\"\"Loads a precompiled Stan model. If no compiled model is found, one will be saved.\"\"\"\n # Identify the model name and directory structure\n rel, sm_dir = fname.split('/stan/')\n sm_name = sm_dir.split('.stan')[0]\n pkl_name = f'{rel}/stan/{sm_name}.pkl' \n # Check if the model is precompiled\n if (os.path.exists(pkl_name)==True) and (force != True):\n print('Found precompiled model. Loading...')\n model = pickle.load(open(pkl_name, 'rb'))\n print('finished!')\n else:\n print('Precompiled model not found. Compiling model...')\n model = pystan.StanModel(fname)\n print('finished!')\n with open(pkl_name, 'wb') as f:\n pickle.dump(model, f) \n return model\n \n\ndef deterministic_log_posterior(alpha, I_1, I_2, p=0.5, neg=False):\n \"\"\"\n Computes the log posterior of the deterministic model for the calibration\n factor.\n\n Parameters\n ----------\n alpha : float\n The calibration factor in units of a.u. per molecule. This must be\n positive\n I_1, I_2 : 1d-arrays or Pandas Series.\n The intensity of the two sister cells in units of a.u. per cell.\n Negative values will raise a ValueError.\n p: float between 0 and 1\n The partitioning probability into one cell or the other. Default value\n is fair partitioning, 0.5.\n neg : bool\n If True, the negative log posterior is returned. Default is False\n\n Returns\n -------\n logp : float\n Value of the log posterior with the provided parameter values.\n \"\"\"\n # Determine the prefactor.\n if neg is True:\n prefactor = -1\n else:\n prefactor = 1\n\n # Ensure alpha is positive. If not, return\n if alpha < 0:\n return prefactor * -np.inf\n\n # Ensure that the two intensities are positive.\n if (I_1 < 0).any() or (I_2 < 0).any():\n raise ValueError('I_1 or I_2 contains negative values. Fix that plz.')\n\n # Make sure value for p is sensical.\n if (p < 0) | (p > 1):\n raise ValueError('p must be on the domain [0, 1]')\n\n # Convert the intensities to protein number.\n n_1 = I_1 / alpha\n n_2 = I_2 / alpha\n n_tot = n_1 + n_2\n k = len(I_1)\n\n # Compute the various parts of the posterior.\n binom = scipy.special.gammaln(n_tot + 1).sum() - scipy.special.gammaln(\n n_1 + 1).sum() - scipy.special.gammaln(n_2 + 1).sum()\n prob = n_1.sum() * np.log(p) + n_2.sum() * np.log(1 - p)\n change_of_var = -k * np.log(alpha)\n\n # Assemble the log posterior.\n logpost = change_of_var + binom + prob\n return prefactor * (logpost)\n\n\ndef estimate_calibration_factor(I_1, I_2, p=0.5, return_eval=False):\n \"\"\"\n Estimates the fluorescence calibration factor through optimization.\n\n Parameters\n ----------\n I_1, I_2 : 1d-arrays or Pandas Series\n The intensities of two sister cells.\n p : float\n The probability of partitioning into one cell or another. Default is\n fair partitioning (0.5).\n return_eval : Bool\n If True, the evaluation statistics from the optimization will be returned.\n\n Returns\n -------\n alpha_opt, alpha_std : float\n The best-fit value for alpha and standard devation.\n \"\"\"\n\n # Perform data validation checks.\n if (I_1 < 0).any() | (I_2 < 0).any():\n raise ValueError(\n 'I_1 and I_2 may not contain negative values. Fix that plz.')\n if (p < 0) | (p > 1):\n raise ValueError('p must be between 0 and 1.')\n\n # Perform the optimization\n popt = scipy.optimize.minimize_scalar(\n deterministic_log_posterior, args=(I_1, I_2, p, True))\n alpha_opt = popt.x\n\n # Compute the hessian.\n hess = smnd.approx_hess([alpha_opt], deterministic_log_posterior,\n args=(I_1, I_2, p, False))\n cov = -np.linalg.inv(hess)\n alpha_std = np.sqrt(cov[0])[0]\n if return_eval is True:\n return [alpha_opt, alpha_std, popt]\n\n else:\n return [alpha_opt, alpha_std]\n\n ", "sub_path": "act/.ipynb_checkpoints/bayes-checkpoint.py", "file_name": "bayes-checkpoint.py", "file_ext": "py", "file_size_in_byte": 12904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pickle.dump", "line_number": 68, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 168, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 194, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 199, "usage_type": "call"}, {"api_name": "stats.compute_hpd", "line_number": 200, "usage_type": "call"}, {"api_name": "viz.bokeh_traceplot", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path", "line_number": 236, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 238, "usage_type": "call"}, {"api_name": "pystan.StanModel", "line_number": 242, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 281, "usage_type": "attribute"}, {"api_name": "scipy.special.special.gammaln", "line_number": 298, "usage_type": "call"}, {"api_name": "scipy.special.special", "line_number": 298, "usage_type": "attribute"}, {"api_name": "scipy.special", "line_number": 298, "usage_type": "name"}, {"api_name": "scipy.special.special.gammaln", "line_number": 299, "usage_type": "call"}, {"api_name": "scipy.special.special", "line_number": 299, "usage_type": "attribute"}, {"api_name": "scipy.special", "line_number": 299, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 301, "usage_type": "call"}, {"api_name": "scipy.special.optimize.minimize_scalar", "line_number": 336, "usage_type": "call"}, {"api_name": "scipy.special.optimize", "line_number": 336, "usage_type": "attribute"}, {"api_name": "scipy.special", "line_number": 336, "usage_type": "name"}, {"api_name": "statsmodels.tools.numdiff.approx_hess", "line_number": 341, "usage_type": "call"}, {"api_name": "statsmodels.tools.numdiff", "line_number": 341, "usage_type": "name"}, {"api_name": "numpy.linalg.inv", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 343, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 344, "usage_type": "call"}]} +{"seq_id": "553843873", "text": "'''\n분류 : DFS & BFS\n문제 : 유기농 배추 (백준 1012)\n작성일자 : 2021.03.22\n'''\n\n# 목적 : 연결 요소의 개수 출력\n# 접근 : bfs와 dfs를 이용하여 연결요소를 구한다 \n# dfs로 구하면 recursive error가 뜨는데 이유를 모르겠음 \n\n\nfrom collections import deque\n\ndef dfs(i,j) : \n if shape[i][j] == 1 : \n shape[i][j] = 2\n for k in range(4) : \n nx = i + dx[k]\n ny = j + dy[k]\n if nx<0 or ny<0 or nx>=N or ny>=M : \n continue\n if shape[nx][ny] == 0 : \n continue\n if shape[nx][ny] == 1 : \n dfs(nx,ny)\n \n return True\n else : \n return False \ndef bfs(i,j) : \n if shape[i][j] == 1 : \n shape[i][j] = 0 \n queue = deque([(i,j)])\n while queue : \n x, y = queue.popleft()\n for i in range(4) : \n nx = x + dx[i]\n ny = y + dy[i]\n if nx<0 or ny<0 or nx>=N or ny>=M : \n continue\n if shape[nx][ny] == 0 : \n continue\n if shape[nx][ny] == 1 :\n queue.append((nx,ny)) \n shape[nx][ny] = 0\n return True \n else : False \n\nT = int(input())\nfor _ in range(T) : \n M, N, K = map(int ,input().split())\n shape = [[0 for _ in range(M)] for _ in range(N)]\n\n for i in range(K) : \n x, y = map(int, input().split())\n shape[y][x] = 1\n\n\n count = 0\n dx = [-1,1,0,0]\n dy = [0,0,-1,1]\n for i in range(N) : \n for j in range(M) : \n if bfs(i,j) == True : \n count += 1\n print(\"\\n\")\n for s in shape : \n print(*s)\n print(count)", "sub_path": "graph_search/search_09_1012.py", "file_name": "search_09_1012.py", "file_ext": "py", "file_size_in_byte": 1753, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "collections.deque", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "654189403", "text": "from flask import Flask, render_template, request, redirect, url_for, session\nimport requests\nfrom bokeh.plotting import figure\nfrom bokeh.embed import components \nimport numpy as np\nimport pandas as pd\n\napp = Flask(__name__)\n\n@app.route('/')\ndef main():\n return redirect('/index')\n\n@app.route('/index', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html')\n\n@app.route('/error-quandl')\ndef error_quandl():\n error_msg = 'Most likely, the ticker you entered was not found in the dataset.'\n return render_template('quandl_error.html', error=error_msg)\n\n@app.route('/graph', methods=['POST'])\ndef graph(): \n ticker = request.form['ticker']\n #price_type = request.form['price_type']\n price_types = request.form.getlist('price_type')\n\n api_url = 'https://www.quandl.com/api/v1/datasets/WIKI/%s.json' % ticker\n session = requests.Session()\n session.mount('http://', requests.adapters.HTTPAdapter(max_retries=3))\n\n try:\n raw_data = session.get(api_url)\n ticker = raw_data.json()['code']\n columns = raw_data.json()['column_names'] \n data = raw_data.json()['data']\n df = pd.DataFrame(data, columns=columns)\n except:\n return redirect('/error-quandl')\n \n df['Date'] = pd.to_datetime(df['Date'])\n #df = df.set_index(['Date'])\n\n TOOLS=\"pan,wheel_zoom,box_zoom,reset,save\"\n plot = figure(tools=TOOLS,\n\t\ttitle='Data from Quandle WIKI set',\n\t\tx_axis_label='date',\n\t\tx_axis_type='datetime')\n\n if 'Closing price' in price_types:\n stock_price = df['Close']\n price_type_label = ticker + ': Close'\n plot.line(df['Date'], stock_price, line_width=2, legend=price_type_label, line_color='red')\n if 'Adjusted closing price' in price_types:\n stock_price = df['Adj. Close']\n price_type_label = ticker + ': Adj. Close' \n plot.line(df['Date'], stock_price, line_width=2, legend=price_type_label, line_color='green')\n if 'Opening price' in price_types:\n stock_price = df['Open']\n price_type_label = ticker + ': Open'\n plot.line(df['Date'], stock_price, line_width=2, legend=price_type_label, line_color='blue')\n if 'Adjusted opening price' in price_types:\n stock_price = df['Adj. Open']\n price_type_label = ticker + ': Adj. Open'\n plot.line(df['Date'], stock_price, line_width=2, legend=price_type_label, line_color='magenta')\n\n script, div = components(plot)\n return render_template('graph.html', script=script, div=div, ticker=ticker)\n\nif __name__ == '__main__':\n #app.run(debug = True, port=33507)\n app.run(host='0.0.0.0')\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2527, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 30, "usage_type": "name"}, {"api_name": "requests.Session", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.session.mount", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 31, "usage_type": "name"}, {"api_name": "requests.adapters.HTTPAdapter", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.adapters", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.session.get", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 34, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 42, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 46, "usage_type": "call"}, {"api_name": "bokeh.embed.components", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "516915227", "text": "import sys\nfrom collections import deque\n\n# N = node의 개수, M = 간선의 개수, V = start node\nN, M, V = map(int, sys.stdin.readline().split())\nmatrix = [[0] * (N + 1) for _ in range(N+1)]\n\nfor _ in range(M):\n x, y = map(int, sys.stdin.readline().split())\n matrix[x][y] = 1\n matrix[y][x] = 1\n\ndef dfs(current_node, row, visited):\n visited.append(current_node)\n for search_node in range(len(row[current_node])):\n if row[current_node][search_node] and not search_node in visited:\n visited = dfs(search_node, row, visited)\n return visited\n\ndef bfs(start):\n queue = deque([start])\n visited = [start]\n while queue:\n current_node = queue.popleft()\n for search_node in range(len(matrix[current_node])):\n if matrix[current_node][search_node] and not search_node in visited:\n queue.append(search_node)\n visited.append(search_node)\n return visited\n\nprint(*dfs(V, matrix, []))\nprint(*bfs(V))", "sub_path": "week_4/신윤재/1260.py", "file_name": "1260.py", "file_ext": "py", "file_size_in_byte": 990, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.stdin.readline", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 5, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 9, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "282385262", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 2 22:30:16 2017\n\n@author: Albert-Desktop\n\"\"\"\n\nimport cv2\nimport math\nimport numpy as np\nfrom keras.models import load_model\n\n# load trained CNN model (created from cnn.py)\nmodel = load_model('my_model.h5')\n\n# read in a picture of a sudoku puzzle\nsudokuPuzzle = cv2.imread(\"sudoku.png\")\nh, w, c = sudokuPuzzle.shape\n\nsudoku = []\n\nfor row in range(9):\n sudokuRow = []\n for col in range(9):\n x = int(w/9) * col\n y = int(h/9) * row\n crop_img = sudokuPuzzle[y:y+int(h/9), x:x+int(w/9)] # Crop from x, y, w, h -> 100, 200, 300, 400\n # NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]\n gray_img = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)\n img = cv2.resize(gray_img, (28,28))\n img = 255-img\n img = img.reshape(1, 28, 28, 1)\n sudokuRow.append(np.argmax(model.predict(img)))\n\n sudoku.append(sudokuRow)\n \n\n\n\n# Algorithm to solve the Sodoku Puzzle\nsudoku = np.array(sudoku)\npossibleValues = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n\n\ndef getNext(row, col):\n if (col == 8):\n return [row+1, 0]\n else:\n return [row, col+1]\n\ndef solve(row, col, puzzle):\n puzzleCopy = np.copy(puzzle)\n nextitem = getNext(row, col)\n # 0 mean empty\n # try to find value for the cell\n if (puzzle[row, col] == 0):\n sectionRow = math.floor(row/3)*3\n sectionCol = math.floor(col/3)*3\n usedValues = set([*list(puzzle[:, col]), *list(puzzle[row,:]), *list(puzzle[sectionRow: sectionRow+3, sectionCol: sectionCol+3].flatten())])\n availableValues = list(usedValues^set(possibleValues))\n if (row == 8 and col == 8):\n puzzleCopy[row, col] = availableValues[0]\n return puzzleCopy\n else:\n for availableValue in availableValues:\n puzzleCopy[row, col] = availableValue\n found = solve(nextitem[0], nextitem[1], puzzleCopy)\n if (found is not None):\n return found\n return None;\n else:\n if (row == 8 and col == 8):\n return puzzleCopy\n found = solve(nextitem[0], nextitem[1], puzzleCopy)\n if (found is not None):\n return found\n return None\n \nsolvePuzzle = solve(0, 0, sudoku)\n\nprint(solvePuzzle) ", "sub_path": "sudokuSolver.py", "file_name": "sudokuSolver.py", "file_ext": "py", "file_size_in_byte": 2325, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "keras.models.load_model", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 53, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 58, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "519739457", "text": "from conans import ConanFile, CMake, tools\nimport os\n\n\nclass GoblinEngineerConan(ConanFile):\n name = \"goblin-engineer\"\n description = \"Keep it short\"\n topics = (\"conan\", \"libname\", \"logging\")\n url = \"https://github.com/cyberduckninja/goblin-engineer\"\n homepage = \"https://github.com/cyberduckninja/goblin-engineer\"\n author = \"kotbegemot \"\n license = \"MIT\"\n exports = [\"LICENSE.md\"]\n exports_sources = [\"CMakeLists.txt\"]\n generators = \"cmake\"\n _cmake = None\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"boost_no_deprecated\": [True, False],\n \"http_component\": [True, False],\n \"cxx_standard\": [14, 17]\n }\n\n default_options = {\n \"boost_no_deprecated\": False,\n \"http_component\": False,\n \"shared\": False,\n \"fPIC\": False,\n \"cxx_standard\": 14\n }\n\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n requires = (\n \"boost/1.75.0\",\n \"actor-zeta/1.0.0a5@cyberduckninja/stable\"\n )\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n\n if self.options.boost_no_deprecated:\n self.options[\"boost\"].error_code_header_only = True\n self.options[\"boost\"].system_no_deprecated = True\n self.options[\"boost\"].asio_no_deprecated = True\n self.options[\"boost\"].filesystem_no_deprecated = True\n\n self.options[\"actor-zeta\"].exceptions_disable = False\n self.options[\"actor-zeta\"].rtti_disable = False\n\n if self.options.cxx_standard == 17:\n self.options[\"actor-zeta\"].cxx_standard = self.options.cxx_standard\n\n # if self.options.shared:\n # self.options[\"actor-zeta\"].SHARED = True\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n if not self._cmake:\n self._cmake = CMake(self)\n self._cmake.definitions[\"CMAKE_CXX_STANDARD\"] = self.options.cxx_standard\n self._cmake.configure(source_dir=self._source_subfolder)\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE.md\", dst=\"licenses\", src=self._source_subfolder)\n include_folder = os.path.join(self._source_subfolder, \"header\")\n self.copy(pattern=\"*\", dst=\"include\", src=include_folder)\n self.copy(pattern=\"*.dll\", dst=\"bin\", keep_path=False)\n self.copy(pattern=\"*.lib\", dst=\"lib\", keep_path=False)\n self.copy(pattern=\"*.a\", dst=\"lib\", keep_path=False)\n self.copy(pattern=\"*.so*\", dst=\"lib\", keep_path=False)\n self.copy(pattern=\"*.dylib\", dst=\"lib\", keep_path=False)\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n", "sub_path": "recipes/goblin-engineer/all/conanfile.py", "file_name": "conanfile.py", "file_ext": "py", "file_size_in_byte": 3080, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "conans.ConanFile", "line_number": 5, "usage_type": "name"}, {"api_name": "conans.tools.get", "line_number": 64, "usage_type": "call"}, {"api_name": "conans.tools", "line_number": 64, "usage_type": "name"}, {"api_name": "os.rename", "line_number": 66, "usage_type": "call"}, {"api_name": "conans.CMake", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "conans.tools.collect_libs", "line_number": 90, "usage_type": "call"}, {"api_name": "conans.tools", "line_number": 90, "usage_type": "name"}]} +{"seq_id": "364602620", "text": "# coding:UTF-8\n# functions: 自作関数(functions.pyからインポート)\n# Json: Jsonデータを扱うためのモジュール\n# colorama: 出力文字色を変更するためのモジュール\nimport functions, json\nimport colorama\nfrom colorama import Fore, Back, Style\n\n# day, month, year に、当日の日付を代入\nday, month = functions.get_date()\nyear = functions.get_year()\nprint(\"今日は\" + str(month) + \"月\" + str(day) + \"日です。\")\n\n# 既存のdata.jsonファイルからデータを読み込む\n# try-exceptで例外処理\n# data.jsonが空の場合(初回使用向け),エラーを防ぐために、ダミーデータを書き込む\ntry:\n li = list()\n jsonData = functions.outputJson()\n for data in jsonData:\n # dataからそれぞれの締切日を計算する\n data_year = int(data['kadai']['deadline'] / 10000)\n data_month = int(data['kadai']['deadline'] % 10000 / 100)\n data_day = int(data['kadai']['deadline'] % 10000 % 100)\n # ダミーデータを追加する\n if data['kadai']['subject'] == \"ダミー\":\n li.append(data)\n continue\n # data_year, data_month, data_day から締切日を過ぎているか判断\n if data_year > year:\n li.append(data)\n continue\n if data_year == year and data_month >= month:\n li.append(data)\n continue\n if data_year == year and data_month == month and data_day >= day:\n li.append(data)\n continue\n with open('data.json', 'w') as target:\n json.dump(li, target, indent=4)\n jsonData = functions.outputJson()\nexcept Exception:\n # ダミーデータを作成\n print(\"ダミーデータ作成\")\n dict = {\n 'kadai': {\n 'subject':\"ダミー\",\n 'assignment':\"00000\",\n 'deadline':00000,\n 'detail':\"00000000\",\n 'dueTime':2460\n }\n }\n li = list()\n with open('data.json', 'r') as target:\n li.append(dict)\n with open('data.json', 'w') as target:\n json.dump(li, target, indent=4)\n jsonData = functions.outputJson()\n pass\n# 締め切り日当日のものだけ出力する\nprint(\"\\n締め切り日当日です!!\")\nfor data in jsonData:\n if data['kadai']['deadline'] == (year * 10000 + 100*month + day):\n print(Fore.GREEN + \"教科名 : \" + data['kadai']['subject'])\n print(\"課題名 : \" + data['kadai']['assignment'])\n functions.printout_deadline(data['kadai']['deadline'], data['kadai']['dueTime'])\n print(\"[詳細]\\n\" + data['kadai']['detail'] + \"\\n\" + Fore.WHITE)\n# メイン処理\nwhile 1:\n command = int(input(\"コマンドを入力してください\\n0: 終了\\n1: 書き込み\\n2: 教科名検索\\n3: 締め切り順ソート\\n\") )\n jsonData = functions.outputJson()\n # プログラムを終了する場合\n if command == 0:\n print(\"終了します\")\n break\n # データベースに書き込む場合\n elif command == 1:\n print(Fore.CYAN + \"書き込み\")\n subject = str(input(\"教科名を入力してください\"))\n assignment = str(input(\"課題を入力してください\"))\n deadline = int(input(\"締め切りを入力してください(yyyymmdd)\"))\n # 入力されたdeadlineが正しい形式か確かめる\n if functions.check_date(deadline) is False:\n continue\n dueTime = int(input(\"締め切り時間を入力して下さい(hhmm)\"))\n detail = str(input(\"詳細を入力してください\" + Fore.WHITE))\n functions.inputJson(subject, assignment, deadline, detail, dueTime)\n # 教科名検索を行う場合\n elif command == 2:\n print(\"教科名検索を行います\")\n subject = str(input(\"教科名を入力してください\"))\n for data in jsonData:\n if data['kadai']['subject'] == subject:\n print(Fore.GREEN + \"\\n課題名 : \" + str(data['kadai']['assignment']))\n functions.printout_deadline(data['kadai']['deadline'], data['kadai']['dueTime'])\n print(\"[詳細]\\n\" + str(data['kadai']['detail']) + Fore.WHITE)\n # 締め切り順にソートする場合\n elif command == 3:\n print(\"締め切り順ソート\")\n jsonData = functions.outputJson()\n all_data = sorted(jsonData, key=lambda x:x['kadai']['deadline'])\n for data in all_data:\n if data['kadai']['subject'] == 'ダミー':\n continue\n print(Fore.MAGENTA + \"\\n教科名 : \" + data['kadai']['subject'])\n print(\"課題名 : \" + data['kadai']['assignment'])\n functions.printout_deadline(data['kadai']['deadline'], data['kadai']['dueTime'])\n print(\"[詳細]\\n\" + data['kadai']['detail'] + Fore.WHITE)\n # 不正なコマンドが入力された場合\n else:\n print(Fore.RED + \"正しいコマンドを入力してください\" + Fore.WHITE)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4986, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "functions.get_date", "line_number": 10, "usage_type": "call"}, {"api_name": "functions.get_year", "line_number": 11, "usage_type": "call"}, {"api_name": "functions.outputJson", "line_number": 19, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 40, "usage_type": "call"}, {"api_name": "functions.outputJson", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 58, "usage_type": "call"}, {"api_name": "functions.outputJson", "line_number": 59, "usage_type": "call"}, {"api_name": "colorama.Fore.GREEN", "line_number": 65, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 65, "usage_type": "name"}, {"api_name": "functions.printout_deadline", "line_number": 67, "usage_type": "call"}, {"api_name": "colorama.Fore.WHITE", "line_number": 68, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 68, "usage_type": "name"}, {"api_name": "functions.outputJson", "line_number": 72, "usage_type": "call"}, {"api_name": "colorama.Fore.CYAN", "line_number": 79, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 79, "usage_type": "name"}, {"api_name": "functions.check_date", "line_number": 84, "usage_type": "call"}, {"api_name": "colorama.Fore.WHITE", "line_number": 87, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 87, "usage_type": "name"}, {"api_name": "functions.inputJson", "line_number": 88, "usage_type": "call"}, {"api_name": "colorama.Fore.GREEN", "line_number": 95, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 95, "usage_type": "name"}, {"api_name": "functions.printout_deadline", "line_number": 96, "usage_type": "call"}, {"api_name": "colorama.Fore.WHITE", "line_number": 97, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 97, "usage_type": "name"}, {"api_name": "functions.outputJson", "line_number": 101, "usage_type": "call"}, {"api_name": "colorama.Fore.MAGENTA", "line_number": 106, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 106, "usage_type": "name"}, {"api_name": "functions.printout_deadline", "line_number": 108, "usage_type": "call"}, {"api_name": "colorama.Fore.WHITE", "line_number": 109, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 109, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 112, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 112, "usage_type": "name"}, {"api_name": "colorama.Fore.WHITE", "line_number": 112, "usage_type": "attribute"}]} +{"seq_id": "268812937", "text": "from django.test import TestCase\nfrom quiz.models import Question, Answer\nfrom django.utils import timezone\n\nclass HomePageTest(TestCase):\n\n def test_uses_home_templates(self):\n response = self.client.get('/')\n self.assertTemplateUsed(response, 'home.html')\n\n def test_can_display_question_model(self):\n Question.objects.create(text='1 + 1 = 2!', pub_date=timezone.now())\n Question.objects.create(text='1 - 1 = 2!', pub_date=timezone.now())\n response = self.client.get('/')\n self.assertContains(response, '1 + 1 = 2!')\n self.assertContains(response, '1 - 1 = 2!')\n\nclass DetailPageTest(TestCase):\n\n def test_uses_detail_templates(self):\n question = Question.objects.create(text='1 + 1 = 2!', pub_date=timezone.now())\n response = self.client.get('/{}/'.format(question.id))\n self.assertTemplateUsed(response, 'detail.html')\n\n def test_can_display_question_and_answer(self):\n question = Question.objects.create(text='1 + 1 = 2!', pub_date=timezone.now())\n answerTrue = Answer.objects.create(question=question, text='True', vote=0)\n answerFalse = Answer.objects.create(question=question, text='False', vote=0)\n response = self.client.get('/{}/'.format(question.id))\n self.assertContains(response, question.text)\n self.assertContains(response, answerTrue.text)\n self.assertContains(response, answerFalse.text)\n\nclass ResultsPageTest(TestCase):\n\n def test_uses_results_templates(self):\n question = Question.objects.create(text='1 + 1 = 2!', pub_date=timezone.now())\n response = self.client.get('/{}/results/'.format(question.id))\n self.assertTemplateUsed(response, 'results.html')\n\n def test_can_display_question_and_answer(self):\n question = Question.objects.create(text='1 + 1 = 2!', pub_date=timezone.now())\n answerTrue = Answer.objects.create(question=question, text='True', vote=0)\n answerFalse = Answer.objects.create(question=question, text='False', vote=0)\n response = self.client.get('/{}/'.format(question.id))\n self.assertContains(response, question.text)\n self.assertContains(response, answerTrue.text)\n self.assertContains(response, str(answerTrue.vote))\n self.assertContains(response, answerFalse.text)\n self.assertContains(response, str(answerFalse.vote))\n\nclass VotePageTest(TestCase):\n\n def test_can_save_a_POST_request(self):\n question = Question.objects.create(text='1 + 1 = 2!', pub_date=timezone.now())\n answerTrue = Answer.objects.create(question=question, text='True', vote=0)\n answerFalse = Answer.objects.create(question=question, text='False', vote=0)\n response = self.client.post('/{}/vote/'.format(question.id), data={'answer': 1})\n answerTrue = Answer.objects.all()[0]\n self.assertEqual(answerTrue.vote, 1)\n\n def test_redirects_after_POST(self):\n question = Question.objects.create(text='1 + 1 = 2!', pub_date=timezone.now())\n answerTrue = Answer.objects.create(question=question, text='True', vote=0)\n answerFalse = Answer.objects.create(question=question, text='False', vote=0)\n response = self.client.post('/{}/vote/'.format(question.id), data={'answer': 1})\n self.assertRedirects(response, '/{}/results/'.format(question.id))\n\nclass AllresultsPageTest(TestCase):\n\n def test_uses_allresults_templates(self):\n question = Question.objects.create(text='1 + 1 = 2!', pub_date=timezone.now())\n response = self.client.get('/allresults/')\n self.assertTemplateUsed(response, 'allresults.html')\n\n def test_can_display_question_and_answer(self):\n question = Question.objects.create(text='1 + 1 = 2!', pub_date=timezone.now())\n answerTrue = Answer.objects.create(question=question, text='True', vote=0)\n answerFalse = Answer.objects.create(question=question, text='False', vote=0)\n response = self.client.get('/allresults/')\n self.assertContains(response, question.text)\n self.assertContains(response, answerTrue.text)\n self.assertContains(response, str(answerTrue.vote))\n self.assertContains(response, answerFalse.text)\n self.assertContains(response, str(answerFalse.vote))\n\nclass AddquestionPageTest(TestCase):\n\n def test_can_save_a_POST_request(self):\n response = self.client.post('/addquestion/', data={'addquestion': '2 - 1 = 3', 'confirmcode': 'pun48'})\n self.assertEqual(Question.objects.count(), 1)\n new_question = Question.objects.first()\n self.assertEqual(new_question.text, '2 - 1 = 3!')\n self.assertEqual(Answer.objects.count(), 2)\n answerTrue = Answer.objects.all()[0]\n answerFalse = Answer.objects.all()[1]\n self.assertEqual(answerTrue.question, new_question)\n self.assertEqual(answerTrue.text, 'True')\n self.assertEqual(answerFalse.question, new_question)\n self.assertEqual(answerFalse.text, 'False')\n\n def test_redirects_after_POST(self):\n response = self.client.post('/addquestion/', data={'addquestion': '2 - 1 = 3', 'confirmcode': 'pun48'})\n self.assertRedirects(response, '/')\n\nclass QuestionAndAnswerModelsTest(TestCase):\n\n def test_saving_and_retrieving_questions_and_answers(self):\n question = Question()\n question.text = '1 + 1 = 2!'\n question.pub_date = timezone.now()\n question.save()\n\n saved_questions = Question.objects.all()\n self.assertEqual(saved_questions.count(), 1)\n saved_question = saved_questions[0]\n self.assertEqual(saved_question.text, question.text)\n self.assertEqual(saved_question.pub_date, question.pub_date)\n\n answer = Answer()\n answer.question = question\n answer.text = 'True'\n answer.vote = 1\n answer.save()\n\n saved_answers = Answer.objects.all()\n self.assertEqual(saved_answers.count(), 1)\n saved_answer = saved_answers[0]\n self.assertEqual(saved_answer.question, answer.question)\n self.assertEqual(saved_answer.text, answer.text)\n self.assertEqual(saved_answer.vote, answer.vote)\n", "sub_path": "quiz/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 6179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.test.TestCase", "line_number": 5, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.create", "line_number": 12, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 12, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 12, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 12, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.create", "line_number": 13, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 13, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 13, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 13, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 18, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.create", "line_number": 21, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 21, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 21, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 21, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.create", "line_number": 26, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 26, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 26, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 26, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.create", "line_number": 27, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 27, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.create", "line_number": 28, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 28, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 34, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.create", "line_number": 37, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 37, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 37, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 37, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.create", "line_number": 42, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 42, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 42, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 42, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.create", "line_number": 43, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 43, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.create", "line_number": 44, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 44, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 52, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.create", "line_number": 55, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 55, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 55, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 55, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.create", "line_number": 56, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 56, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.create", "line_number": 57, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 57, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.all", "line_number": 59, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 59, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.create", "line_number": 63, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 63, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 63, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 63, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.create", "line_number": 64, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 64, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.create", "line_number": 65, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 65, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 69, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.create", "line_number": 72, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 72, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 72, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 72, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.create", "line_number": 77, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 77, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 77, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 77, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.create", "line_number": 78, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 78, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.create", "line_number": 79, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 79, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 87, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.count", "line_number": 91, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 91, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.first", "line_number": 92, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 92, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.count", "line_number": 94, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 94, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.all", "line_number": 95, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 95, "usage_type": "name"}, {"api_name": "quiz.models.Answer.objects.all", "line_number": 96, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 96, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 106, "usage_type": "name"}, {"api_name": "quiz.models.Question", "line_number": 109, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 111, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 111, "usage_type": "name"}, {"api_name": "quiz.models.Question.objects.all", "line_number": 114, "usage_type": "call"}, {"api_name": "quiz.models.Question.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "quiz.models.Question", "line_number": 114, "usage_type": "name"}, {"api_name": "quiz.models.Answer", "line_number": 120, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects.all", "line_number": 126, "usage_type": "call"}, {"api_name": "quiz.models.Answer.objects", "line_number": 126, "usage_type": "attribute"}, {"api_name": "quiz.models.Answer", "line_number": 126, "usage_type": "name"}]} +{"seq_id": "77144471", "text": "#!/usr/bin/env python3\n###############################################################################\n# Copyright (c) Intel Corporation - All rights reserved. #\n# This file is part of the LIBXSMM library. #\n# #\n# For information on the license, see the LICENSE file. #\n# Further information: https://github.com/hfp/libxsmm/ #\n# SPDX-License-Identifier: BSD-3-Clause #\n###############################################################################\n# Anand Venkat (Intel Corp.)\n###############################################################################\n\nimport logging\nimport sys\nimport numpy as np\nimport tvm\nimport topi\nimport time\nfrom topi.util import get_const_tuple\nimport math\nimport topi.testing\nimport xlwt\nimport argparse\n\nimport os\nimport ctypes\nfrom tvm import autotvm\nfrom tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-d\", nargs=1, type=str, default=[\"resnet3\"])\nargs = parser.parse_args()\nlayer = args.d[0]\n\n#Resnet-50 layers (excluding first layer)\n_resnet_layers ={\n 'resnet2':[1,256,64,56,56,1,1,0],\n 'resnet3':[1,64,64,56,56,1,1,0],\n 'resnet4':[1,64,64,56,56,3,1,1],\n 'resnet5':[1,64,256,56,56,1,1,0],\n 'resnet6':[1,512,256,56,56,1,2,0],\n 'resnet7':[1,128,256,56,56,1,2,0],\n 'resnet8':[1,128,128,28,28,3,1,1],\n 'resnet9':[1,512,128,28,28,1,1,0],\n 'resnet10':[1,128,512,28,28,1,1,0],\n 'resnet11':[1,1024,512,28,28,1,2,0],\n 'resnet12':[1,256,512,28,28,1,2,0],\n 'resnet13':[1,256,256,14,14,3,1,1],\n 'resnet14':[1,1024,256,14,14,1,1,0],\n 'resnet15':[1,256,1024,14,14,1,1,0],\n 'resnet16':[1,2048,1024,14,14,1,2,0],\n 'resnet17':[1,512,1024,14,14,1,2,0],\n 'resnet18':[1,512,512,7,7,3,1,1],\n 'resnet19':[1,2048,512,7,7,1,1,0],\n 'resnet20':[1,512,2048,7,7,1,1,0]\n}\n\n'''\nConvert input from NCHW format to NCHW16C format where the innermost data dimension is vectorized for AVX-512\n'''\ndef convert_input(a_np, batch, in_channel,input_height,input_width,pad_height,pad_width,vlen,A):\n to_return = np.zeros((batch, math.ceil(in_channel/vlen),input_height + 2*pad_height, input_width+ 2*pad_width,vlen),dtype = A.dtype)\n\n for i in range(batch):\n for j in range(math.ceil(in_channel/vlen)):\n for k in range(input_height + 2*pad_height):\n for l in range(input_width + 2*pad_width):\n for m in range(vlen):\n if k < pad_height or k >= input_height + pad_height or l < pad_width or l >= input_width+ pad_width or j*vlen + m >= in_channel:\n to_return[i,j,k,l,m] = float(0)\n else:\n to_return[i,j,k,l,m] = a_np[i,j*vlen + m,k-pad_height,l-pad_width]\n\n return to_return\n'''\nConvert output from NCHW format to NCHW16C format where the innermost data dimension is vectorized for AVX-512\n'''\n\ndef convert_output(a_np, batch, out_channel,output_height,output_width,vlen):\n to_return = np.zeros((batch, out_channel,output_height, output_width), dtype = float)\n for i in range(batch):\n for j in range(math.ceil(out_channel/vlen)):\n for k in range(output_height):\n for l in range(output_width):\n for m in range(vlen):\n to_return[i,j*vlen + m,k,l] = a_np[i,j,k,l,m]\n\n\n\n return to_return\n\n'''\nConvert weights from KCRS format to KCRS16C16K format where the innermost data dimension is vectorized for AVX-512\n'''\n\ndef convert_weight(w_np, in_channel, out_channel, kernel_height, kernel_width, vlen,W):\n to_return = np.zeros((math.ceil(out_channel/vlen), math.ceil(in_channel/vlen),kernel_height, kernel_width,vlen,vlen), dtype = W.dtype)\n\n for i in range(math.ceil(out_channel/vlen)):\n for j in range(math.ceil(in_channel/vlen)):\n for k in range(kernel_height):\n for l in range(kernel_width):\n for m in range(vlen):\n for n in range(vlen):\n if i*vlen + n >= out_channel or j*vlen + m >= in_channel:\n to_return[i,j,k,l,m,n] =float(0)\n else:\n to_return[i,j,k,l,m,n] = w_np[i*vlen + n,j*vlen+ m,k,l]\n\n\n\n return to_return\n\n\n# Get the reference output tensor for correctness check\ndef get_ref_data(batch,out_channel,in_channel,input_height,input_width,kernel_height,kernel_width,stride_height,padding):\n a_np = np.random.uniform(size=(batch,in_channel,input_height,input_width)).astype(float)\n w_np = np.random.uniform(size=(out_channel,in_channel,kernel_height,kernel_width)).astype(float)\n if batch == 1:\n b_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride_height, padding)\n #b_np = topi.nn.conv2d_NCHWc(a_np, w_np,out_channel,kernel_height,stride_height,\n # padding, layout=\"NCHWc\", out_layout=\"NCHWc\", out_dtype='float32')\n\n if batch == 1:\n return a_np, w_np, b_np\n else:\n return a_np, w_np\n\n\n#special case for small height and width (e.g.. h = w = 7), where (h*w) becomes dimension of the brgemm (M)\ndef intrin_libxsmm_hxw(ofmblock,ofw,ifmblock, stride_width,ifw,rco, ifh,r,s, ifh_stride, ifw_stride,\\\n ofh, stride_height, out_channel,output_height, output_width, in_channel):\n\n last_input_width_index = (ofw-1)*stride_width + s-1\n\n last_input_height_index = (ofh-1)*stride_height + r-1\n ry = tvm.reduce_axis((0, r), name='ry')\n rx = tvm.reduce_axis((0, s), name='rx')\n\n\n A = tvm.placeholder((rco,r,s,ifmblock, ofmblock), name='w')\n B = tvm.placeholder((rco,last_input_height_index + 1,last_input_width_index + 1,ifmblock), name='b')\n k = tvm.reduce_axis((0, ifmblock), name='k')\n k_outer = tvm.reduce_axis((0, rco), name='k_outer')\n C = tvm.compute(\n (ofh,ofw,ofmblock),\n lambda z,m,n: tvm.sum(A[k_outer,ry,rx,k,n] * B[k_outer,ry + z*stride_height,rx + m*stride_width,k], axis=[k_outer,ry,rx,k]),\n name='out')\n\n s1 = tvm.create_schedule(C.op)\n\n ifw1,ofw1,ofmblock1 = s1[C].op.axis\n\n rco_outer,ry,rx,rci = s1[C].op.reduce_axis\n s1[C].reorder(ifw1,rco_outer,ry,rx,ofw1,ofmblock1,rci)\n\n xx_ptr = tvm.decl_buffer(A.shape, A.dtype,\n name=\"W\",offset_factor = 1,\n data_alignment=64)\n\n\n yy_ptr = tvm.decl_buffer(B.shape, B.dtype,\n name=\"X\",offset_factor=1,\\\n strides=[tvm.var(\"s3\"),tvm.var(\"s2\"), ifmblock, 1],#offset_factor=16\n data_alignment=64)\n\n zz_ptr = tvm.decl_buffer(C.shape, C.dtype,\n name=\"OUT\",offset_factor=1,#offset_factor=1,\n strides=[output_width*ofmblock, ofmblock, 1],\n data_alignment=64)\n\n def intrin_func(ins, outs):\n # tvm call extern is used to interface to libxsmm bacth reduce kernel gemm implementation\n # rco*r*s is the number of batches\n init_and_compute = tvm.call_extern (\"int32\",\"batch_reduce_kernel_init_update\", ins[0].access_ptr(\"r\"),ins[1].access_ptr(\"r\"),outs[0].access_ptr(\"w\"),\\\n rco*r*s,ofmblock,ifmblock,r,s,ifh_stride,ifw_stride, ofw*ofh, stride_width)\n reset = tvm.call_extern (\"int32\",\"batch_reduce_kernel_init\", outs[0].access_ptr(\"w\"),ofmblock, ofw*ofh)\n body = tvm.call_extern (\"int32\",\"batch_reduce_kernel_update\", ins[0].access_ptr(\"r\"),ins[1].access_ptr(\"r\"),outs[0].access_ptr(\"w\"), rco*r*s,ofmblock,\\\n ifmblock,ofw*ofh, stride_width,r,s, ifh_stride,ifw_stride)\n if math.ceil(in_channel/ifmblock) == rco:\n return init_and_compute, None, init_and_compute\n else:\n return init_and_compute,reset,body\n\n with tvm.build_config(data_alignment=64):\n return tvm.decl_tensor_intrin(C.op, intrin_func, name=\"GEMM\",\n binds= {A: xx_ptr,\n B: yy_ptr,\n C: zz_ptr})\n\n# regular case of batch reduce gemm with ofw corresponding to batch reduce brgemm dimension(M)\ndef intrin_libxsmm_tuned(ofmblock,ofw,ifmblock, stride_width,ifw,rco, ifh,r,s, ifh_stride, ifw_stride, in_channel):\n last_input_width_index = (ofw-1)*stride_width + s-1\n A = tvm.placeholder((rco,r,s,ifmblock, ofmblock), name='w')\n B = tvm.placeholder((rco,r,last_input_width_index + 1,ifmblock), name='b')\n k = tvm.reduce_axis((0, ifmblock), name='k')\n k_outer = tvm.reduce_axis((0, rco), name='k_outer')\n ry = tvm.reduce_axis((0, r), name='ry')\n rx = tvm.reduce_axis((0, s), name='rx')\n C = tvm.compute(\n (ofw,ofmblock),\n lambda m,n: tvm.sum(A[k_outer,ry,rx,k,n] * B[k_outer,ry, rx + m*stride_width,k], axis=[k_outer,ry,rx,k]),\n name='out')\n s1 = tvm.create_schedule(C.op)\n w,ofm = s1[C].op.axis\n kco,ky,kx,kci = s1[C].op.reduce_axis\n s1[C].reorder(kco,ky,kx,w,ofm,kci)\n xx_ptr = tvm.decl_buffer(A.shape, A.dtype,\n name=\"W\",offset_factor=1,\n data_alignment=64)\n\n yy_ptr = tvm.decl_buffer(B.shape, B.dtype,\n name=\"some\", offset_factor=1,strides=[tvm.var(\"s3\"), tvm.var(\"s2\"), ifmblock, 1],\n data_alignment=64)\n\n zz_ptr = tvm.decl_buffer(C.shape, C.dtype,\n name=\"OUT\",offset_factor=1,\n data_alignment=64)\n\n def intrin_func(ins, outs):\n # tvm call extern is used to interface to libxsmm batch reduce kernel gemm implementation\n # rco*r*s is the number of batches\n init_and_compute = tvm.call_extern (\"int32\",\"batch_reduce_kernel_init_update\", ins[0].access_ptr(\"r\"),ins[1].access_ptr(\"r\"),outs[0].access_ptr(\"w\"),\\\n rco*r*s,ofmblock,ifmblock,r,s,ifh_stride,ifw_stride, ofw, stride_width)\n reset = tvm.call_extern (\"int32\",\"batch_reduce_kernel_init\", outs[0].access_ptr(\"w\"),ofmblock, ofw)\n body = tvm.call_extern (\"int32\",\"batch_reduce_kernel_update\", ins[0].access_ptr(\"r\"),ins[1].access_ptr(\"r\"),outs[0].access_ptr(\"w\"), rco*r*s,ofmblock,\\\n ifmblock,ofw, stride_width,r,s, ifh_stride,ifw_stride)\n if math.ceil(in_channel/ifmblock) == rco:\n return init_and_compute, None, init_and_compute\n else:\n return init_and_compute,reset,body\n\n with tvm.build_config(data_alignment=64):\n return tvm.decl_tensor_intrin(C.op, intrin_func, name=\"GEMM\",\n binds={A: xx_ptr,\n B: yy_ptr,\n C: zz_ptr})\n\n#AutoTVM template for libxmm brgemm based tensorize implementation\n@autotvm.template\ndef conv_auto_tuned(ofmblock,ofw, ifmblock, stride_width,input_width,\\\n in_channel,input_height, filter_height, filter_width,ofh, stride_height, batch, out_channel):\n\n A1 = tvm.placeholder((batch,math.ceil(in_channel/ifmblock),input_height, input_width, ifmblock), name='input')\n W1 = tvm.placeholder((math.ceil(out_channel/ofmblock), math.ceil(in_channel/ifmblock), filter_height, filter_width, ifmblock,ofmblock), name='weight')\n\n rco1 = tvm.reduce_axis((0, math.ceil(in_channel/ifmblock)), name='rco1')\n ry1 = tvm.reduce_axis((0, filter_height), name='ry1')\n rx1 = tvm.reduce_axis((0, filter_width), name='rx1')\n rci1 = tvm.reduce_axis((0, ifmblock), name='rci1')\n cfg = autotvm.get_config()\n\n cfg.define_knob(\"pack\", [0,1])\n pack = False\n w_tile = []\n\n factor_found = False\n\n\n for i in range(6, min(ofw+1,29)):\n if ofw % i == 0:\n w_tile.append((i, ofw//i) )\n factor_found = True\n\n if factor_found == False:\n w_tile.append((ofw,1))\n\n #tile factors for output width\n cfg.define_knob(\"tile_w\", w_tile)\n\n # pack data when stride > 1 and pack flag set so that data for brgemm is continuous\n if filter_height == 1 and filter_width == 1 and stride_width > 1 and stride_height > 1 and cfg['pack'].val == 1 :\n A2 = tvm.compute((batch, math.ceil(in_channel/ifmblock),ofh,ofw,ifmblock),\n lambda n,c,h,w,vlen1: A1[n, c,h*stride_height,w*stride_width,vlen1])\n B1 = tvm.compute((batch, math.ceil(out_channel/ofmblock),ofh, ofw,ofmblock),\n lambda nn,ff,yy, xx, vlen1: tvm.sum(\n W1[ff,rco1,ry1,rx1,rci1,vlen1] * A2[nn, rco1, ry1 + yy, rx1 + xx,rci1],\n axis=[rco1,ry1, rx1, rci1]),name='output')\n pack = True\n else:\n # Compute the convolution\n B1 = tvm.compute((batch, math.ceil(out_channel/ofmblock),ofh, ofw,ofmblock),\n lambda nn,ff,yy, xx, vlen1: tvm.sum(\n W1[ff,rco1,ry1,rx1,rci1,vlen1] * A1[nn, rco1, ry1 + stride_height*yy, rx1 + stride_width*xx,rci1],\n axis=[rco1,ry1, rx1, rci1]), name='output')\n\n s = tvm.create_schedule(B1.op)\n n,ko,h,w,ki = s[B1].op.axis\n rco,ry,rx, rci = s[B1].op.reduce_axis\n cfg.define_split(\"tile_h\", h, num_outputs=3)#output height\n cfg.define_split(\"tile_c\", rco, num_outputs=2) #input channel dimension\n cfg.define_split(\"tile_k\",ko, num_outputs=2) #output channel dimension\n w_factor_inner, _ = cfg[\"tile_w\"].val\n wo, wi = s[B1].split(w, w_factor_inner) #tiling\n rco_o,rco_i = cfg[\"tile_c\"].apply(s, B1, rco)\n ko_o, ko_i = cfg[\"tile_k\"].apply(s, B1, ko)\n ho,hm, hi = cfg[\"tile_h\"].apply(s, B1, h)\n\n s[B1].reorder(n,ko_o,ho,ko_i,rco_o,hm,wo,hi,rco_i,ry,rx,wi,ki,rci)\n cfg.define_reorder(\"reorder_outer\", [ko_i,rco_o,hm,wo], policy=\"all\")\n cfg.add_flop(np.prod(get_const_tuple(B1.shape))*in_channel*filter_height*filter_width*2)\n cfg[\"reorder_outer\"].apply(s, B1,[ko_i,rco_o,hm,wo])\n if (filter_height == 1 and filter_width == 1 and stride_width == 1 and stride_height == 1) or pack:\n if cfg[\"tile_h\"].size[1] > 1 and w_factor_inner == ofw:#cfg[\"tile_w\"].size[2] == ofw:\n libxsmm_tensorize = intrin_libxsmm_hxw(ofmblock,w_factor_inner,ifmblock, 1, w_factor_inner,\n cfg[\"tile_c\"].size[1],cfg[\"tile_h\"].size[2],\\\n filter_height, filter_width,ofh,ofw,cfg[\"tile_h\"].size[2],1, out_channel, ofh,ofw, in_channel)\n s[B1].tensorize(hi, libxsmm_tensorize)\n else:\n libxsmm_tensorize = intrin_libxsmm_tuned(ofmblock,w_factor_inner,ifmblock, 1, w_factor_inner,\n cfg[\"tile_c\"].size[1], cfg[\"tile_h\"].size[2],\\\n filter_height, filter_width,ofh, ofw, in_channel)\n s[B1].tensorize(rco_i, libxsmm_tensorize)\n\n else:\n\n libxsmm_tensorize = intrin_libxsmm_tuned(ofmblock,w_factor_inner,ifmblock, stride_width, w_factor_inner,\\\n cfg[\"tile_c\"].size[1], cfg[\"tile_h\"].size[2],\\\n filter_height, filter_width,input_height,input_width, in_channel)\n s[B1].tensorize(rco_i, libxsmm_tensorize)\n\n par = s[B1].fuse(n,ko_o,ho)\n s[B1].parallel(par)\n if pack:\n n1,c1,h1,w1,v1 = s[A2].op.axis\n par2 = s[A2].fuse(n1,c1,h1)\n s[A2].parallel(par)\n s[A2].vectorize(v1)\n\n s = s.normalize()\n\n return s, [W1, A1, B1]\n\ndef driver():\n\n\n book = xlwt.Workbook(encoding=\"utf-8\")\n sheet1 = book.add_sheet(\"Sheet 1\")\n row1=0\n sheet1.write(0,0,\"Layer\")\n sheet1.write(0,1,\"AutoTVM_FLOPS\")\n row1 = row1 + 1\n\n\n\n batch = _resnet_layers[layer][0]\n in_channel = _resnet_layers[layer][2]\n out_channel = _resnet_layers[layer][1]\n input_height = _resnet_layers[layer][3]\n input_width = _resnet_layers[layer][4]\n kernel_height = _resnet_layers[layer][5]\n kernel_width = _resnet_layers[layer][5]\n pad_height = _resnet_layers[layer][7]\n pad_width = _resnet_layers[layer][7]\n stride_height = _resnet_layers[layer][6]\n stride_width = _resnet_layers[layer][6]\n vlen = 64\n assert(pad_height == pad_width)\n assert(stride_height == stride_width)\n assert(kernel_height == kernel_width)\n\n output_width = ((input_width + 2 * pad_width - kernel_width) // stride_width) + 1\n output_height = ((input_height + 2 * pad_height - kernel_height) // stride_height) + 1\n assert(output_height == output_width)\n assert(input_height == input_width)\n\n\n ctx = tvm.context('llvm', 0)\n sheet1.write(row1,0,layer)\n\n\n\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n\n\n task = autotvm.task.create(conv_auto_tuned, args=(vlen,output_width, vlen, stride_width,input_width + 2*pad_width, in_channel,\\\n input_height + 2*pad_height, kernel_height, kernel_width,output_height, stride_height, batch, out_channel),\\\n target='llvm -mtriple=x86_64 -mcpu=skylake-avx512 -mattr=+skx,+fma,+fma4,+avx512ifma,+avx512f,+avx512cd,+avx512bw,+avx512vl,+avx512dq')\n\n logging.getLogger('autotvm').setLevel(logging.DEBUG)\n logging.getLogger('autotvm').addHandler(logging.StreamHandler(sys.stdout))\n\n measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=autotvm.LocalRunner(number=1000, repeat=1,min_repeat_ms=1000))\n\n tuner = autotvm.tuner.RandomTuner(task)\n #Please limit n_trial to reduce tuning time\n n_trial= len(task.config_space)\n log_file = layer + \".log\"\n\n #comment out the following call to tuner to just run the best case from log file history\n tuner.tune(n_trial=n_trial,\n measure_option=measure_option,\n callbacks=[\n autotvm.callback.progress_bar(n_trial, prefix=layer),\n\n autotvm.callback.log_to_file(log_file)])\n with autotvm.apply_history_best( layer+'.log'):\n with tvm.target.create(\"llvm\"):\n\n a_np, w_np, b_np = get_ref_data(batch,out_channel,in_channel,input_height,input_width,kernel_height, kernel_width,stride_height,pad_height)\n s, arg_bufs = conv_auto_tuned(vlen,output_width, vlen, stride_width,input_width + 2*pad_width, in_channel,\\\n input_height + 2*pad_height, kernel_height, kernel_width,output_height, stride_height, batch, out_channel)\n\n a_np2 = convert_input(a_np, batch, in_channel,input_height,input_width,pad_height,pad_width,vlen, arg_bufs[1])\n w_np2 = convert_weight(w_np, in_channel, out_channel, kernel_height, kernel_width,vlen,arg_bufs[0])\n ctx = tvm.context('llvm', 0)\n b = tvm.nd.array(np.zeros((batch, math.ceil(out_channel/vlen),output_height, output_width,vlen), dtype=arg_bufs[2].dtype), ctx)\n a = tvm.nd.array(a_np2, ctx)\n w = tvm.nd.array(w_np2, ctx)\n\n func = tvm.build(s, arg_bufs,target=\\\n 'llvm -mtriple=x86_64 -mcpu=skylake-avx512 -mattr=+skx,+fma,+fma4,+avx512ifma,+avx512f,+avx512cd,+avx512bw,+avx512vl,+avx512dq', name=\"conv2d\")\n func(w,a,b)\n b_np_A = convert_output(b.asnumpy(), 1,out_channel, output_height, output_width,vlen)\n np.testing.assert_allclose(b_np_A, b_np, rtol=1e-5)\n evaluator1 = func.time_evaluator(func.entry_name, ctx, number=1000,repeat=1, min_repeat_ms=1)\n\n t1 = evaluator1(w,a, b).mean\n gflops_tvm1 = np.prod(get_const_tuple(arg_bufs[2].shape))*in_channel*kernel_height*kernel_width*2\n gflops_tvm1 = gflops_tvm1/1e9/t1\n\n print(\"Time for conv(tuned) is : {0:.6f}\".format(t1))\n print(\"GFLOPS : {0:.3f} \".format( gflops_tvm1))\n\n\n sheet1.write(row1,1,gflops_tvm1)\n\n row1 = row1 + 1\n book.save( \"AutoTVM_tensorize_resnet\" + layer +\".xls\")\n\n\nif __name__ == \"__main__\":\n driver()\n\n", "sub_path": "samples/deeplearning/tvm_cnnlayer/mb1_tuned_latest.py", "file_name": "mb1_tuned_latest.py", "file_ext": "py", "file_size_in_byte": 20227, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 62, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 80, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 97, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 97, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 99, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 118, "usage_type": "attribute"}, {"api_name": "topi.testing.conv2d_nchw_python", "line_number": 120, "usage_type": "call"}, {"api_name": "topi.testing", "line_number": 120, "usage_type": "attribute"}, {"api_name": "tvm.reduce_axis", "line_number": 137, "usage_type": "call"}, {"api_name": "tvm.reduce_axis", "line_number": 138, "usage_type": "call"}, {"api_name": "tvm.placeholder", "line_number": 141, "usage_type": "call"}, {"api_name": "tvm.placeholder", "line_number": 142, "usage_type": "call"}, {"api_name": "tvm.reduce_axis", "line_number": 143, "usage_type": "call"}, {"api_name": "tvm.reduce_axis", "line_number": 144, "usage_type": "call"}, {"api_name": "tvm.compute", "line_number": 145, "usage_type": "call"}, {"api_name": "tvm.sum", "line_number": 147, "usage_type": "call"}, {"api_name": "tvm.create_schedule", "line_number": 150, "usage_type": "call"}, {"api_name": "tvm.decl_buffer", "line_number": 157, "usage_type": "call"}, {"api_name": "tvm.decl_buffer", "line_number": 162, "usage_type": "call"}, {"api_name": "tvm.var", "line_number": 164, "usage_type": "call"}, {"api_name": "tvm.decl_buffer", "line_number": 167, "usage_type": "call"}, {"api_name": "tvm.call_extern", "line_number": 175, "usage_type": "call"}, {"api_name": "tvm.call_extern", "line_number": 177, "usage_type": "call"}, {"api_name": "tvm.call_extern", "line_number": 178, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 180, "usage_type": "call"}, {"api_name": "tvm.build_config", "line_number": 185, "usage_type": "call"}, {"api_name": "tvm.decl_tensor_intrin", "line_number": 186, "usage_type": "call"}, {"api_name": "tvm.placeholder", "line_number": 194, "usage_type": "call"}, {"api_name": "tvm.placeholder", "line_number": 195, "usage_type": "call"}, {"api_name": "tvm.reduce_axis", "line_number": 196, "usage_type": "call"}, {"api_name": "tvm.reduce_axis", "line_number": 197, "usage_type": "call"}, {"api_name": "tvm.reduce_axis", "line_number": 198, "usage_type": "call"}, {"api_name": "tvm.reduce_axis", "line_number": 199, "usage_type": "call"}, {"api_name": "tvm.compute", "line_number": 200, "usage_type": "call"}, {"api_name": "tvm.sum", "line_number": 202, "usage_type": "call"}, {"api_name": "tvm.create_schedule", "line_number": 204, "usage_type": "call"}, {"api_name": "tvm.decl_buffer", "line_number": 208, "usage_type": "call"}, {"api_name": "tvm.decl_buffer", "line_number": 212, "usage_type": "call"}, {"api_name": "tvm.var", "line_number": 213, "usage_type": "call"}, {"api_name": "tvm.decl_buffer", "line_number": 216, "usage_type": "call"}, {"api_name": "tvm.call_extern", "line_number": 223, "usage_type": "call"}, {"api_name": "tvm.call_extern", "line_number": 225, "usage_type": "call"}, {"api_name": "tvm.call_extern", "line_number": 226, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 228, "usage_type": "call"}, {"api_name": "tvm.build_config", "line_number": 233, "usage_type": "call"}, {"api_name": "tvm.decl_tensor_intrin", "line_number": 234, "usage_type": "call"}, {"api_name": "tvm.placeholder", "line_number": 244, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 244, "usage_type": "call"}, {"api_name": "tvm.placeholder", "line_number": 245, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 245, "usage_type": "call"}, {"api_name": "tvm.reduce_axis", "line_number": 247, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 247, "usage_type": "call"}, {"api_name": "tvm.reduce_axis", "line_number": 248, "usage_type": "call"}, {"api_name": "tvm.reduce_axis", "line_number": 249, "usage_type": "call"}, {"api_name": "tvm.reduce_axis", "line_number": 250, "usage_type": "call"}, {"api_name": "tvm.autotvm.get_config", "line_number": 251, "usage_type": "call"}, {"api_name": "tvm.autotvm", "line_number": 251, "usage_type": "name"}, {"api_name": "tvm.compute", "line_number": 273, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 273, "usage_type": "call"}, {"api_name": "tvm.compute", "line_number": 275, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 275, "usage_type": "call"}, {"api_name": "tvm.sum", "line_number": 276, "usage_type": "call"}, {"api_name": "tvm.compute", "line_number": 282, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 282, "usage_type": "call"}, {"api_name": "tvm.sum", "line_number": 283, "usage_type": "call"}, {"api_name": "tvm.create_schedule", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 301, "usage_type": "call"}, {"api_name": "topi.util.get_const_tuple", "line_number": 301, "usage_type": "call"}, {"api_name": "tvm.autotvm.template", "line_number": 240, "usage_type": "attribute"}, {"api_name": "tvm.autotvm", "line_number": 240, "usage_type": "name"}, {"api_name": "xlwt.Workbook", "line_number": 337, "usage_type": "call"}, {"api_name": "tvm.context", "line_number": 368, "usage_type": "call"}, {"api_name": "tvm.autotvm.task.create", "line_number": 378, "usage_type": "call"}, {"api_name": "tvm.autotvm.task", "line_number": 378, "usage_type": "attribute"}, {"api_name": "tvm.autotvm", "line_number": 378, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 382, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 382, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 383, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 383, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 383, "usage_type": "attribute"}, {"api_name": "tvm.autotvm.measure_option", "line_number": 385, "usage_type": "call"}, {"api_name": "tvm.autotvm", "line_number": 385, "usage_type": "name"}, {"api_name": "tvm.autotvm.LocalBuilder", "line_number": 385, "usage_type": "call"}, {"api_name": "tvm.autotvm.LocalRunner", "line_number": 385, "usage_type": "call"}, {"api_name": "tvm.autotvm.tuner.RandomTuner", "line_number": 387, "usage_type": "call"}, {"api_name": "tvm.autotvm.tuner", "line_number": 387, "usage_type": "attribute"}, {"api_name": "tvm.autotvm", "line_number": 387, "usage_type": "name"}, {"api_name": "tvm.autotvm.callback.progress_bar", "line_number": 396, "usage_type": "call"}, {"api_name": "tvm.autotvm.callback", "line_number": 396, "usage_type": "attribute"}, {"api_name": "tvm.autotvm", "line_number": 396, "usage_type": "name"}, {"api_name": "tvm.autotvm.callback.log_to_file", "line_number": 398, "usage_type": "call"}, {"api_name": "tvm.autotvm.callback", "line_number": 398, "usage_type": "attribute"}, {"api_name": "tvm.autotvm", "line_number": 398, "usage_type": "name"}, {"api_name": "tvm.autotvm.apply_history_best", "line_number": 399, "usage_type": "call"}, {"api_name": "tvm.autotvm", "line_number": 399, "usage_type": "name"}, {"api_name": "tvm.target.create", "line_number": 400, "usage_type": "call"}, {"api_name": "tvm.target", "line_number": 400, "usage_type": "attribute"}, {"api_name": "tvm.context", "line_number": 408, "usage_type": "call"}, {"api_name": "tvm.nd.array", "line_number": 409, "usage_type": "call"}, {"api_name": "tvm.nd", "line_number": 409, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 409, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 409, "usage_type": "call"}, {"api_name": "tvm.nd.array", "line_number": 410, "usage_type": "call"}, {"api_name": "tvm.nd", "line_number": 410, "usage_type": "attribute"}, {"api_name": "tvm.nd.array", "line_number": 411, "usage_type": "call"}, {"api_name": "tvm.nd", "line_number": 411, "usage_type": "attribute"}, {"api_name": "tvm.build", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 417, "usage_type": "attribute"}, {"api_name": "numpy.prod", "line_number": 421, "usage_type": "call"}, {"api_name": "topi.util.get_const_tuple", "line_number": 421, "usage_type": "call"}]} +{"seq_id": "610794209", "text": "from PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\nclass Ui_MainWindow(object):\r\n def setupUi(self, MainWindow, information):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(1029, 600)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)\r\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\r\n self.splitter = QtWidgets.QSplitter(self.centralwidget)\r\n self.splitter.setOrientation(QtCore.Qt.Horizontal)\r\n self.splitter.setObjectName(\"splitter\")\r\n\r\n self.vtk_panel = QtWidgets.QFrame(self.splitter)\r\n self.vtk_panel.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.vtk_panel.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.vtk_panel.setObjectName(\"vtk_panel\")\r\n self.horizontalLayout.addWidget(self.splitter)\r\n\r\n self.frame = QtWidgets.QFrame(self.splitter)\r\n self.frame.setLayoutDirection(QtCore.Qt.LeftToRight)\r\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.frame.setObjectName(\"frame\")\r\n self.verticalLayout = QtWidgets.QVBoxLayout(self.frame)\r\n self.verticalLayout.setObjectName(\"verticalLayout\")\r\n\r\n self.label = QtWidgets.QLabel(self.frame)\r\n self.label.setObjectName(\"label\")\r\n self.verticalLayout.addWidget(self.label)\r\n\r\n self.label_2 = QtWidgets.QLabel(self.frame)\r\n self.label_2.setObjectName(\"label_2\")\r\n self.verticalLayout.addWidget(self.label_2)\r\n\r\n self.label_3 = QtWidgets.QLabel(self.frame)\r\n self.label_3.setObjectName(\"label_3\")\r\n self.verticalLayout.addWidget(self.label_3)\r\n\r\n self.label_4 = QtWidgets.QLabel(self.frame)\r\n self.label_4.setObjectName(\"label_4\")\r\n self.verticalLayout.addWidget(self.label_4)\r\n\r\n self.label_5 = QtWidgets.QLabel(self.frame)\r\n self.label_5.setObjectName(\"label_5\")\r\n self.verticalLayout.addWidget(self.label_5)\r\n\r\n self.label_6 = QtWidgets.QLabel(self.frame)\r\n self.label_6.setObjectName(\"label_6\")\r\n self.verticalLayout.addWidget(self.label_6)\r\n\r\n self.label_7 = QtWidgets.QLabel(self.frame)\r\n self.label_7.setObjectName(\"label_7\")\r\n self.verticalLayout.addWidget(self.label_7)\r\n\r\n self.label_8 = QtWidgets.QLabel(self.frame)\r\n self.label_8.setObjectName(\"label_8\")\r\n self.verticalLayout.addWidget(self.label_8)\r\n\r\n self.label_9 = QtWidgets.QLabel(self.frame)\r\n self.label_9.setObjectName(\"label_9\")\r\n self.verticalLayout.addWidget(self.label_9)\r\n\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.menubar = QtWidgets.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1029, 23))\r\n self.menubar.setObjectName(\"menubar\")\r\n MainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n\r\n self.retranslateUiwithINfor(MainWindow,information)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"GlyphViewer: \"))\r\n self.label.setText(_translate(\"MainWindow\", \"PatientID: \"))\r\n self.label_2.setText(_translate(\"MainWindow\", \"PatientName: \"))\r\n self.label_3.setText(_translate(\"MainWindow\", \"PatientBirthDate: \"))\r\n self.label_4.setText(_translate(\"MainWindow\", \"PatientSex: \"))\r\n self.label_5.setText(_translate(\"MainWindow\", \"StudyID: \"))\r\n self.label_6.setText(_translate(\"MainWindow\", \"StudyDate: \"))\r\n self.label_7.setText(_translate(\"MainWindow\", \"StudyTime: \"))\r\n self.label_8.setText(_translate(\"MainWindow\", \"InstitutionName: \"))\r\n self.label_9.setText(_translate(\"MainWindow\", \"Manufacturer: \"))\r\n\r\n def retranslateUiwithINfor(self, MainWindow, information):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"GlyphViewer: \"))\r\n self.label.setText(_translate(\"MainWindow\", \"PatientID: \"+ information['PatientID']))\r\n self.label_2.setText(_translate(\"MainWindow\", \"PatientName: \"+ str(information['PatientName'])))\r\n self.label_3.setText(_translate(\"MainWindow\", \"PatientBirthDate: \"+ information['PatientBirthDate']))\r\n self.label_4.setText(_translate(\"MainWindow\", \"PatientSex: \"+ information['PatientSex']))\r\n self.label_5.setText(_translate(\"MainWindow\", \"StudyID: \"+ information['StudyID']))\r\n self.label_6.setText(_translate(\"MainWindow\", \"StudyDate: \"+ information['StudyDate']))\r\n self.label_7.setText(_translate(\"MainWindow\", \"StudyTime: \"+ information['StudyTime']))\r\n self.label_8.setText(_translate(\"MainWindow\", \"InstitutionName: \"+ information['InstitutionName']))\r\n self.label_9.setText(_translate(\"MainWindow\", \"Manufacturer: \"+ information['Manufacturer']))\r\n\r\n\r\n# if __name__==\"__main__\":\r\n# infor = {'PatientID': 'AW959532241.111.1212659242', 'PatientName': '01^AC', 'PatientBirthDate': '',\r\n# 'PatientSex': 'M',\r\n# 'StudyID': '', 'StudyDate': '20080605', 'StudyTime': '111034.203000', 'InstitutionName': 'CHU de Rouen',\r\n# 'Manufacturer': 'SIEMENS'}\r\n# import sys\r\n# app=QtWidgets.QApplication(sys.argv)\r\n# widget=QtWidgets.QMainWindow()\r\n# ui=Ui_MainWindow()\r\n# ui.setupUi(widget, infor)\r\n# widget.show()\r\n# sys.exit(app.exec_())", "sub_path": "dicomVis/dicomVis/dicomView.py", "file_name": "dicomView.py", "file_ext": "py", "file_size_in_byte": 5781, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 7, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 7, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 9, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QSplitter", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 11, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 12, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 12, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 15, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 15, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 16, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 16, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 17, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 17, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 21, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 22, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 22, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 23, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 23, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 24, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 24, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 26, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 26, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 29, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 33, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 33, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 37, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 41, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 45, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 45, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 49, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 49, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 53, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 53, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 57, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 57, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 61, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 61, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMenuBar", "line_number": 66, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 66, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 67, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 67, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QStatusBar", "line_number": 70, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 70, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QMetaObject.connectSlotsByName", "line_number": 76, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QMetaObject", "line_number": 76, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 76, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QCoreApplication", "line_number": 79, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 79, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QCoreApplication", "line_number": 92, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "356867299", "text": "import urllib.request\nfrom lxml import etree\n\nfrom xml.dom import minidom\n\ndef getProductAttribs(url):\n new_request = urllib.request.Request(url)\n new_response = urllib.request.urlopen(new_request)\n new_html_code = new_response.read().decode('utf-8')\n new_tree = etree.HTML(new_html_code)\n\n name = (new_tree.xpath('.//span[@itemprop=\"name\"]/text()')).__str__()\n price = new_tree.xpath('.//div[@id=\"fix_price\"]/text()').__str__()\n main_active = (new_tree.xpath('//*[starts-with(text(),\"активн\")]/..//text()')).__str__()\n other_active = (new_tree.xpath('//*[preceding::i[starts-with(.,\"активн\")]][following::i[starts-with(.,\"вспомог\")]]/text()')).__str__()\n\n main_active.replace(\"\\\\\\\\xa0\", \"\")\n main_active.replace(\"\\\\xa0\", \"\" ).replace(\"['\", \"\").replace(\"']\", \"\")\n other_active.replace(\"\\\\\\\\xa0\", \"\")\n other_active.replace(\"\\\\xa0\", \"\" ).replace(\"['\", \"\").replace(\"']\", \"\")\n\n #Создаем элемент XML файла product\n doc = minidom.Document()\n #product\n productXML = doc.createElement('product')\n\n #url\n urlXML = doc.createElement('url')\n url_text = doc.createTextNode(url)\n urlXML.appendChild(url_text)\n\n productXML.appendChild(urlXML)\n\n #name\n nameXML = doc.createElement('name')\n name_text = doc.createTextNode(name)\n nameXML.appendChild(name_text)\n\n productXML.appendChild(nameXML)\n \n #price\n priceXML = doc.createElement('price')\n price_text = doc.createTextNode(price)\n priceXML.appendChild(price_text)\n\n productXML.appendChild(priceXML)\n\n #active\n activeXML = doc.createElement('active')\n active_text = doc.createTextNode(main_active + ',' + other_active)\n activeXML.appendChild(active_text)\n\n productXML.appendChild(activeXML)\n\n return productXML\n\n\ndef getAllProducts (url):\n max_pages = 0\n request = urllib.request.Request(url)\n response = urllib.request.urlopen(request)\n html_code = response.read().decode('utf-8')\n tree = etree.HTML(html_code)\n nodes = tree.xpath('.//div[@class=\"links\"]')\n i = 0\n\n count_divs_a = tree.xpath('count(.//div[@class=\"links\"]/a/text())')\n\n while i < count_divs_a :\n \n a_text = tree.xpath('.//div[@class=\"links\"]/a/text()')[i]\n\n if a_text == \">\":\n prev_page = tree.xpath('.//div[@class=\"links\"]/a/text()')[i-1]\n #there is max_pages \n max_pages = int(prev_page)\n print ('max_pages=' ,max_pages)\n i = i+1\n\n cut_url = url[:-1]\n\n #Теперь относительно всех страниц\n count_all_pages = 0\n k = 1\n while k < max_pages+1:\n new_url = cut_url + str(k)\n print ('new_url=', new_url)\n new_request = urllib.request.Request(new_url)\n new_response = urllib.request.urlopen(new_request)\n new_html_code = new_response.read().decode('utf-8')\n new_tree = etree.HTML(new_html_code)\n print ('page=', k)\n m = 0\n while m < new_tree.xpath('count(.//div[@class=\"lotImage\"]/a)'):\n print (new_tree.xpath('.//a[@class=\"clickbleLink\"]')[m].get('href'))\n productsXML.appendChild(getProductAttribs((new_tree.xpath('.//a[@class=\"clickbleLink\"]')[m].get('href'))))\n m = m +1\n count_all_pages = count_all_pages + 1\n k = k + 1\n print (count_all_pages)\n\n\n\n#max_pages = 0\n\n\n#Cоздаем корневой элемент главной XML-ины\ndoc = minidom.Document()\n\n#products\nproductsXML = doc.createElement('products')\ndoc.appendChild(productsXML)\n\n#url = 'http://biosfera.kz/product/category?path=13_451&page=1'\nurl = 'http://biosfera.kz/product/category?path=13'\n#getAllProducts(url)\n\nrequest = urllib.request.Request(url)\nresponse = urllib.request.urlopen(request)\nhtml_code = response.read().decode('utf-8')\nmain_tree = etree.HTML(html_code)\n\nn=0\n\ncount_categories = main_tree.xpath('count(.//div[@class=\"sectionsList\"]/a)')\n\nwhile n < (count_categories ) :\n if n != count_categories - 1:\n prev_page = main_tree.xpath('.//div[@class=\"sectionsList\"]/a')[n].get('href')\n prev_page = prev_page + '&page=1'\n print(prev_page)\n getAllProducts(prev_page) \n n = n + 1\n else:\n prev_page = main_tree.xpath('.//div[@class=\"sectionsList\"]/a')[n].get('href')\n prev_page = prev_page + '?page=1'\n print(prev_page)\n getAllProducts(prev_page) \n n = n + 1\n \n#prev_page = main_tree.xpath('.//div[@class=\"sectionsList\"]/a/text()')\n\nxml_str = doc.toprettyxml(indent=\" \")\nwith open(\"minidom_example.xml\", \"w\") as f:\n f.write(xml_str)\n", "sub_path": "parser_v3.py", "file_name": "parser_v3.py", "file_ext": "py", "file_size_in_byte": 4766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "urllib.request.request.Request", "line_number": 7, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 7, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 7, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 8, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 8, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 8, "usage_type": "name"}, {"api_name": "lxml.etree.HTML", "line_number": 10, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 10, "usage_type": "name"}, {"api_name": "xml.dom.minidom.Document", "line_number": 23, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 23, "usage_type": "name"}, {"api_name": "urllib.request.request.Request", "line_number": 60, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 60, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 60, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 61, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 61, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 61, "usage_type": "name"}, {"api_name": "lxml.etree.HTML", "line_number": 63, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 63, "usage_type": "name"}, {"api_name": "urllib.request.request.Request", "line_number": 88, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 88, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 88, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 89, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 89, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 89, "usage_type": "name"}, {"api_name": "lxml.etree.HTML", "line_number": 91, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 91, "usage_type": "name"}, {"api_name": "xml.dom.minidom.Document", "line_number": 108, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 108, "usage_type": "name"}, {"api_name": "urllib.request.request.Request", "line_number": 118, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 118, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 118, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 119, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 119, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 119, "usage_type": "name"}, {"api_name": "lxml.etree.HTML", "line_number": 121, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 121, "usage_type": "name"}]} +{"seq_id": "340629130", "text": "# Tests specific to the dask class\n\nimport os\nimport pytest\n\nfrom numpy.testing import assert_allclose\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy import units as u\n\ntry:\n from distributed.utils_test import client, loop, cluster_fixture # noqa\n DISTRIBUTED_INSTALLED = True\nexcept ImportError:\n DISTRIBUTED_INSTALLED = False\n\nfrom spectral_cube import DaskSpectralCube\nfrom .test_casafuncs import make_casa_testimage\n\ntry:\n import casatools\n from casatools import image\n CASA_INSTALLED = True\nexcept ImportError:\n try:\n from taskinit import ia as image\n CASA_INSTALLED = True\n except ImportError:\n CASA_INSTALLED = False\n\nDATA = os.path.join(os.path.dirname(__file__), 'data')\n\n\nclass Array:\n\n args = None\n kwargs = None\n\n def compute(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n\ndef test_scheduler(data_adv):\n\n cube = DaskSpectralCube.read(data_adv)\n fake_array = Array()\n\n cube._compute(fake_array)\n assert fake_array.kwargs == {'scheduler': 'synchronous'}\n\n with cube.use_dask_scheduler('threads'):\n cube._compute(fake_array)\n assert fake_array.kwargs == {'scheduler': 'threads'}\n\n cube._compute(fake_array)\n assert fake_array.kwargs == {'scheduler': 'synchronous'}\n\n cube.use_dask_scheduler('threads')\n cube._compute(fake_array)\n assert fake_array.kwargs == {'scheduler': 'threads'}\n\n with cube.use_dask_scheduler('processes', num_workers=4):\n cube._compute(fake_array)\n assert fake_array.kwargs == {'scheduler': 'processes', 'num_workers': 4}\n\n cube._compute(fake_array)\n assert fake_array.kwargs == {'scheduler': 'threads'}\n\n\ndef test_save_to_tmp_dir(data_adv):\n pytest.importorskip('zarr')\n cube = DaskSpectralCube.read(data_adv)\n cube_new = cube.sigma_clip_spectrally(3, save_to_tmp_dir=True)\n # The following test won't necessarily always work in future since the name\n # is not really guaranteed, but this is pragmatic enough for now\n assert cube_new._data.name.startswith('from-zarr')\n\n\ndef test_rechunk(data_adv):\n cube = DaskSpectralCube.read(data_adv)\n assert cube._data.chunksize == (4, 3, 2)\n cube_new = cube.rechunk(chunks=(1, 2, 3))\n # note last element is 2 because the chunk size we asked for\n # is larger than cube - this is fine and deliberate in this test\n assert cube_new._data.chunksize == (1, 2, 2)\n\n\ndef test_statistics(data_adv):\n cube = DaskSpectralCube.read(data_adv).rechunk(chunks=(1, 2, 3))\n stats = cube.statistics()\n assert_quantity_allclose(stats['npts'], 24)\n assert_quantity_allclose(stats['mean'], 0.4941651776136591 * u.K)\n assert_quantity_allclose(stats['sigma'], 0.3021908870982011 * u.K)\n assert_quantity_allclose(stats['sum'], 11.85996426272782 * u.K)\n assert_quantity_allclose(stats['sumsq'], 7.961125988022091 * u.K ** 2)\n assert_quantity_allclose(stats['min'], 0.0363300285196364 * u.K)\n assert_quantity_allclose(stats['max'], 0.9662900439556562 * u.K)\n assert_quantity_allclose(stats['rms'], 0.5759458158839716 * u.K)\n\n\n@pytest.mark.skipif(not CASA_INSTALLED, reason='Requires CASA to be installed')\ndef test_statistics_consistency_casa(data_adv, tmp_path):\n\n # Similar to test_statistics but compares to CASA directly.\n\n cube = DaskSpectralCube.read(data_adv)\n stats = cube.statistics()\n\n make_casa_testimage(data_adv, tmp_path / 'casa.image')\n\n ia = casatools.image()\n ia.open(str(tmp_path / 'casa.image'))\n stats_casa = ia.statistics()\n ia.close()\n\n for key in stats:\n if isinstance(stats[key], u.Quantity):\n value = stats[key].value\n else:\n value = stats[key]\n assert_allclose(value, stats_casa[key])\n\n\nif DISTRIBUTED_INSTALLED:\n\n def test_dask_distributed(client, tmpdir): # noqa\n\n # Make sure that we can use dask distributed. This is a regression test for\n # a bug caused by FilledArrayHandler not being serializable.\n\n cube = DaskSpectralCube.read(os.path.join(DATA, 'basic.image'))\n cube.use_dask_scheduler(client)\n\n cube.sigma_clip_spectrally(2, save_to_tmp_dir=tmpdir.strpath)\n", "sub_path": "spectral_cube/tests/test_dask.py", "file_name": "test_dask.py", "file_ext": "py", "file_size_in_byte": 4195, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 30, "usage_type": "call"}, {"api_name": "spectral_cube.DaskSpectralCube.read", "line_number": 45, "usage_type": "call"}, {"api_name": "spectral_cube.DaskSpectralCube", "line_number": 45, "usage_type": "name"}, {"api_name": "pytest.importorskip", "line_number": 71, "usage_type": "call"}, {"api_name": "spectral_cube.DaskSpectralCube.read", "line_number": 72, "usage_type": "call"}, {"api_name": "spectral_cube.DaskSpectralCube", "line_number": 72, "usage_type": "name"}, {"api_name": "spectral_cube.DaskSpectralCube.read", "line_number": 80, "usage_type": "call"}, {"api_name": "spectral_cube.DaskSpectralCube", "line_number": 80, "usage_type": "name"}, {"api_name": "spectral_cube.DaskSpectralCube.read", "line_number": 89, "usage_type": "call"}, {"api_name": "spectral_cube.DaskSpectralCube", "line_number": 89, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 91, "usage_type": "call"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 92, "usage_type": "call"}, {"api_name": "astropy.units.K", "line_number": 92, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 92, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 93, "usage_type": "call"}, {"api_name": "astropy.units.K", "line_number": 93, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 93, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 94, "usage_type": "call"}, {"api_name": "astropy.units.K", "line_number": 94, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 94, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 95, "usage_type": "call"}, {"api_name": "astropy.units.K", "line_number": 95, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 95, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 96, "usage_type": "call"}, {"api_name": "astropy.units.K", "line_number": 96, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 96, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 97, "usage_type": "call"}, {"api_name": "astropy.units.K", "line_number": 97, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 97, "usage_type": "name"}, {"api_name": "astropy.tests.helper.assert_quantity_allclose", "line_number": 98, "usage_type": "call"}, {"api_name": "astropy.units.K", "line_number": 98, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 98, "usage_type": "name"}, {"api_name": "spectral_cube.DaskSpectralCube.read", "line_number": 106, "usage_type": "call"}, {"api_name": "spectral_cube.DaskSpectralCube", "line_number": 106, "usage_type": "name"}, {"api_name": "test_casafuncs.make_casa_testimage", "line_number": 109, "usage_type": "call"}, {"api_name": "casatools.image", "line_number": 111, "usage_type": "call"}, {"api_name": "astropy.units.Quantity", "line_number": 117, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 117, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 121, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 101, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 101, "usage_type": "attribute"}, {"api_name": "spectral_cube.DaskSpectralCube.read", "line_number": 131, "usage_type": "call"}, {"api_name": "spectral_cube.DaskSpectralCube", "line_number": 131, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "distributed.utils_test.client", "line_number": 132, "usage_type": "argument"}]} +{"seq_id": "378062048", "text": "import os\r\nimport json\r\nimport datetime\r\n\r\nfrom flask import Flask, request\r\nfrom dotenv import load_dotenv\r\nimport requests\r\nimport pandas as pd\r\n\r\nimport tokuchogo\r\nimport preprocessing\r\nimport analyzegraph\r\nimport Summon_chiwawa\r\nimport chiwawa_status\r\nimport LDAcooking2\r\n\r\n\r\ndef getDemoDate():\r\n global COUNTER\r\n if COUNTER == 0:\r\n date = datetime.datetime.strptime(\"2018-10-19\",\"%Y-%m-%d\").date()\r\n elif COUNTER == 1:\r\n date = datetime.datetime.strptime(\"2018-10-26\",\"%Y-%m-%d\").date()\r\n elif COUNTER == 2:\r\n date = datetime.datetime.strptime(\"2018-11-02\",\"%Y-%m-%d\").date()\r\n else:\r\n date = datetime.datetime.strptime(\"2018-11-02\",\"%Y-%m-%d\").date()\r\n return date\r\ndef getDemoData():\r\n global COUNTER\r\n if COUNTER == 0:\r\n data = preprocessing.readData(\"data/data_master_2018-06.csv\")\r\n COUNTER += 1\r\n elif COUNTER == 1:\r\n data = preprocessing.readData(\"data/data_master_2018-07.csv\")\r\n COUNTER += 1\r\n elif COUNTER == 2:\r\n data = preprocessing.readData(\"data/data_master_2018-10.csv\")\r\n COUNTER += 1\r\n else:\r\n data = preprocessing.readData(\"data/data_master_2018-10.csv\")\r\n COUNTER += 1\r\n return data\r\n\r\napp = Flask(__name__)\r\nload_dotenv('.env')\r\nenv = os.environ\r\nCOUNTER = 0\r\n\r\n# 先ほど作成した、Hello, world!\r\n@app.route('/')\r\ndef hello_world():\r\n return 'Hello, World!'\r\n\r\n\r\n# 知話輪サーバーからのWebhookを受け取るエンドポイント\r\n@app.route('/message', methods=['POST'])\r\ndef messages():\r\n if is_request_valid(request):\r\n body = request.get_json(silent=True)\r\n companyId = body['companyId']\r\n msgObj = body['message']\r\n groupId = msgObj['groupId']\r\n \r\n msgTextHead = msgObj['text'].split()[0]\r\n if '今週のチワワ' in msgTextHead:\r\n dt_now = getDemoDate() #datetime.datetime.now().date()\r\n now_data = getDemoData() # getTodayMessage()\r\n messageText = makeMessage(now_data, dt_now)\r\n send_message(companyId, groupId, messageText)\r\n return \"OK\"\r\n else:\r\n return \"request is not valid.\"\r\n\r\n# 検証トークンを用いて、リクエスト送信元が正しいか検証する\r\ndef is_request_valid(request):\r\n validationToken = env['CHIWAWA_VALIDATION_TOKEN']\r\n requestToken = request.headers['X-Chiwawa-Webhook-Token']\r\n return validationToken == requestToken\r\n\r\n# 知話輪APIを用いて、サーバにメッセージを送信する\r\ndef send_message(companyId, groupId, message):\r\n url = 'https://{0}.chiwawa.one/api/public/v1/groups/{1}/messages'.format(companyId, groupId)\r\n headers = {\r\n 'Content-Type': 'application/json',\r\n 'X-Chiwawa-API-Token': env['CHIWAWA_API_TOKEN']\r\n }\r\n content = {\r\n 'text': message\r\n }\r\n requests.post(url, headers=headers, data=json.dumps(content))\r\n\r\ndef get_today_message(companyId, groupId):\r\n now = datetime.datetime.now()\r\n senshu = now - datetime.timedelta(weeks=1)\r\n now_time = int(senshu.timestamp()*1000)\r\n url = 'https://{0}.chiwawa.one/api/public/v1/groups/{1}/messages'\\\r\n '?maxResults=100&createdAtFrom={2}'.format(companyId, groupId, now_time)\r\n headers = {\r\n 'Content-Type': 'application/json',\r\n 'X-Chiwawa-API-Token': env['CHIWAWA_API_TOKEN']\r\n }\r\n response = requests.get(url, headers=headers)\r\n response = response.json()['messages']\r\n return response\r\n\r\ndef makeMessage(now_data, dt_now):\r\n mention_matrix = analyzegraph.countMention(now_data)\r\n graph_data = analyzegraph.graph_features(mention_matrix)\r\n pagerank = pd.DataFrame(list(graph_data[0].items()),columns=['user','now_eikyoudo'])\r\n tokuchogo = getTokuchogo(now_data.merge(pagerank))\r\n kako_wadai = LDAcooking2.LDA_cooking()\r\n level = chiwawa_status.chiwawa_score(now_data, graph_data[2],tokuchogo, dt_now)\r\n now_date = dt_now.strftime(\"%Y年%m月%d日\")\r\n messageText = \"----{5} 今週のチワワ----\\n\\\r\n今週もお疲れさまでした!\\n\\\r\n今週の皆さんの会話を聞いていたチワワの名前は……\\n\\\r\n「{0}チワワ」になりました。\\n\\\r\n同じ話題が過去に出てきたのは{1}でした。\\n\\\r\nその時のチャットは「{2}」のような感じでした。\\n\\\r\n{0}チワワのレベルは{3}になりました。\\n\\\r\n{4}\\\r\n来週も頑張りましょう!\".format(tokuchogo,kako_wadai[1],kako_wadai[0],level,Summon_chiwawa.summon(level), now_date)\r\n return messageText\r\n\r\ndef getTokuchogo(dataFrame):\r\n docs = dataFrame[\"tango\"].to_list()\r\n reaction = tokuchogo.min_max(dataFrame[\"reaction\"].to_numpy())\r\n eikyoudo = dataFrame[\"now_eikyoudo\"].to_numpy()\r\n weights = reaction*eikyoudo\r\n weighted_tokuchogo = tokuchogo.extractTokuchoWord(docs,weights)\r\n return weighted_tokuchogo\r\n\r\n\r\nif __name__ == \"__main__\":\r\n now_data = preprocessing.readData(\"data/data_master_2018-10.csv\")\r\n now_date = datetime.datetime.strptime(\"2018-10-19\",\"%Y-%m-%d\").date()\r\n print(makeMessage(now_data, now_date))\r\n # companyId = \"\"\r\n # groupId = \"\"\r\n # print(get_today_message(companyId, groupId))\r\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5172, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "attribute"}, {"api_name": "preprocessing.readData", "line_number": 32, "usage_type": "call"}, {"api_name": "preprocessing.readData", "line_number": 35, "usage_type": "call"}, {"api_name": "preprocessing.readData", "line_number": 38, "usage_type": "call"}, {"api_name": "preprocessing.readData", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 45, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 46, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "argument"}, {"api_name": "flask.request.get_json", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.headers", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 78, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 91, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 94, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 95, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 103, "usage_type": "call"}, {"api_name": "analyzegraph.countMention", "line_number": 108, "usage_type": "call"}, {"api_name": "analyzegraph.graph_features", "line_number": 109, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 110, "usage_type": "call"}, {"api_name": "LDAcooking2.LDA_cooking", "line_number": 112, "usage_type": "call"}, {"api_name": "chiwawa_status.chiwawa_score", "line_number": 113, "usage_type": "call"}, {"api_name": "Summon_chiwawa.summon", "line_number": 123, "usage_type": "call"}, {"api_name": "tokuchogo.min_max", "line_number": 128, "usage_type": "call"}, {"api_name": "tokuchogo.extractTokuchoWord", "line_number": 131, "usage_type": "call"}, {"api_name": "preprocessing.readData", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 137, "usage_type": "attribute"}]} +{"seq_id": "143571678", "text": "import logging\nfrom logging.handlers import TimedRotatingFileHandler\n\ndef is_ascii(s):\n return all(ord(c) < 128 for c in s)\n\nclass MyFilter(logging.Filter):\n\n def filter(self, record: logging.LogRecord) -> bool:\n return is_ascii(record.getMessage())\n\n# def filter_asci(record: logging.LogRecord) -> bool:\n# return is_ascii(record.getMessage())\n\n\nclass SpecialFileHandler(TimedRotatingFileHandler):\n\n def emit(self, record):\n if not record.levelno == self.level:\n return\n super().emit(record)\n\n\ndict_config = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"base\": {\n \"format\": \"%(levelname)s | %(name)s | %(asctime)s | %(lineno)d | %(message)s\"\n }\n },\n \"filters\": {\n \"myfilter\": {\n \"()\": MyFilter,\n }\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"base\",\n 'filters': ['myfilter']\n },\n \"file_info\": {\n \"()\": SpecialFileHandler,\n \"level\": \"INFO\",\n \"formatter\": \"base\",\n \"filename\": \"info.log\",\n \"when\": \"h\",\n \"interval\": 10,\n \"backupCount\": 1,\n 'filters': ['myfilter']\n },\n \"file_debug\": {\n \"()\": SpecialFileHandler,\n \"level\": \"DEBUG\",\n \"formatter\": \"base\",\n \"filename\": \"debug.log\",\n \"when\": \"h\",\n \"interval\": 10,\n \"backupCount\": 1,\n 'filters': ['myfilter']\n },\n \"file_exception\": {\n \"()\": SpecialFileHandler,\n \"level\": \"ERROR\",\n \"formatter\": \"base\",\n \"filename\": \"exception.log\",\n \"when\": \"h\",\n \"interval\": 10,\n \"backupCount\": 1,\n 'filters': ['myfilter']\n }\n },\n \"loggers\": {\n \"module_logger\": {\n \"lvl\": \"DEBUG\",\n \"handlers\": [\"file_info\", \"file_exception\", \"file_debug\", \"console\"],\n # \"propagate\": False,\n }\n },\n # \"filters\": [\"filter_asci\"]\n # \"root\": {} # == \"\": {}\n}\n", "sub_path": "logging_to_different_files/logging_config.py", "file_name": "logging_config.py", "file_ext": "py", "file_size_in_byte": 2192, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.Filter", "line_number": 7, "usage_type": "attribute"}, {"api_name": "logging.LogRecord", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.handlers.TimedRotatingFileHandler", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "534990951", "text": "\"\"\"\nCreate an authorisation file for the GitHub API.\n\"\"\"\nfrom .utils import _GITHUB_AUTH_PATH\nfrom .utils import _get_input\nfrom .utils import get_username\nfrom .utils import log\n\nWARNING_MESSAGE = \"\"\"\nYour username and personal access token are saved in plain text in the\nfile that is created. You should set the file permissions provided by \nyour operating system to ensure that your GitHub credentials are safe.\n\"\"\"\n\n\ndef authorise(username=None, token=None):\n \"\"\"Create an authorisation file for the GitHub API.\n\n When requesting information about the MSL repositories_ that are\n available on GitHub there is a limit to how often you can send\n requests to the GitHub API. If you have a GitHub account and\n include your username and a `personal access token`_ with each\n request then this limit is increased.\n\n .. important::\n\n Calling this function will create a file that contains your GitHub\n username and a `personal access token`_ so that GitHub requests are\n authorised. Your username and `personal access token`_ are saved in\n plain text in the file that is created. You should set the file\n permissions provided by your operating system to ensure that your\n GitHub credentials are safe.\n\n .. versionadded:: 2.3.0\n\n .. versionchanged:: 2.4.0\n Renamed the `password` keyword argument to `token`.\n\n .. versionchanged:: 2.5.0\n Renamed function to `authorise`.\n\n .. _repositories: https://github.com/MSLNZ\n .. _personal access token: https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token\n\n Parameters\n ----------\n username : :class:`str`, optional\n The GitHub username. If :data:`None` then you will be\n asked for the `username`.\n token : :class:`str`, optional\n A GitHub `personal access token`_ for `username`. If :data:`None`\n then you will be asked for the `token`.\n \"\"\"\n if username is None:\n default = get_username()\n try:\n username = _get_input('Enter your GitHub username [default: {}]: '.format(default))\n except KeyboardInterrupt:\n log.warning('\\nDid not create the GitHub authorisation file')\n return\n else:\n if not username:\n username = default\n\n if token is None:\n try:\n token = _get_input('Enter your GitHub personal access token: ')\n except KeyboardInterrupt:\n log.warning('\\nDid not create the GitHub authorisation file')\n return\n\n if not username:\n log.warning('You must enter a username. Did not create the GitHub authorisation file')\n return\n\n if not token:\n log.warning('You must enter a personal access token. Did not create the GitHub authorisation file')\n return\n\n with open(_GITHUB_AUTH_PATH, mode='wt') as fp:\n fp.write(username + ':' + token)\n\n log.warning(WARNING_MESSAGE)\n log.info('GitHub credentials were saved to %s', _GITHUB_AUTH_PATH)\n", "sub_path": "msl/package_manager/authorise.py", "file_name": "authorise.py", "file_ext": "py", "file_size_in_byte": 3031, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "utils.get_username", "line_number": 55, "usage_type": "call"}, {"api_name": "utils._get_input", "line_number": 57, "usage_type": "call"}, {"api_name": "utils.log.warning", "line_number": 59, "usage_type": "call"}, {"api_name": "utils.log", "line_number": 59, "usage_type": "name"}, {"api_name": "utils._get_input", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.log.warning", "line_number": 69, "usage_type": "call"}, {"api_name": "utils.log", "line_number": 69, "usage_type": "name"}, {"api_name": "utils.log.warning", "line_number": 73, "usage_type": "call"}, {"api_name": "utils.log", "line_number": 73, "usage_type": "name"}, {"api_name": "utils.log.warning", "line_number": 77, "usage_type": "call"}, {"api_name": "utils.log", "line_number": 77, "usage_type": "name"}, {"api_name": "utils._GITHUB_AUTH_PATH", "line_number": 80, "usage_type": "argument"}, {"api_name": "utils.log.warning", "line_number": 83, "usage_type": "call"}, {"api_name": "utils.log", "line_number": 83, "usage_type": "name"}, {"api_name": "utils.log.info", "line_number": 84, "usage_type": "call"}, {"api_name": "utils._GITHUB_AUTH_PATH", "line_number": 84, "usage_type": "argument"}, {"api_name": "utils.log", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "18948053", "text": "import csv\nfrom matplotlib import pyplot\nfrom datetime import datetime\n\nfilename = \"sitka_weather_2014.csv\"\nwith open(filename) as f:\n reader = csv.reader(f)\n header_row = next(reader)\n\n dates,highs,lows = [],[],[]\n for row in reader:\n dates.append(datetime.strptime(row[0], \"%Y-%m-%d\"))\n highs.append(int(row[1]))\n lows.append(int(row[3]))\n\nfor index, column_header in enumerate(header_row):\n print(index, column_header)\n\nfig = pyplot.figure(dpi = 128, figsize = (10, 6))\npyplot.plot(dates, highs, c = \"red\",linewidth = 1,alpha = 0.5)\npyplot.plot(dates, lows, c = \"blue\",linewidth = 1,alpha = 0.5)\npyplot.fill_between(dates,highs,lows,facecolor='blue',alpha=0.1)\n\npyplot.title(\"Daily high temperatures, 2014\", fontsize=10)\npyplot.xlabel('', fontsize=10)\nfig.autofmt_xdate()\npyplot.ylabel(\"Temperature (F)\", fontsize=10)\npyplot.tick_params(axis='both', which='major', labelsize=10)\n\npyplot.show()\n", "sub_path": "CSV/highs_lows.py", "file_name": "highs_lows.py", "file_ext": "py", "file_size_in_byte": 933, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "csv.reader", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "614664836", "text": "import cv2\nfrom datetime import time, timedelta, datetime, date\nimport math\n\nvideo_location = 'D:/VOP_scenarios/scenarios/videos_0905/gilles_video.mp4'\n\nframe_location = 'D:/test/'\nsave_location = 'D:/VOP_scenarios/scenarios/2_personen/rgb_frames/'\n\nvideo_start = datetime.combine(date.today(), time(hour=14, minute=3, second=34))\nvideo_stop = datetime.combine(date.today(), time(hour=15, minute=44, second=28))\n\nscene_start = datetime.combine(date.today(), time(hour=15, minute=0, second=0))\nscene_stop = datetime.combine(date.today(), time(hour=15, minute=21, second=0))\n\nfps = 28.86\nframe_margin = 100\n\n\nscene_start_frame = int((scene_start - video_start).seconds * fps - frame_margin)\nscene_stop_frame = int((scene_stop - video_start).seconds * fps + frame_margin)\nscene_frame_amount = scene_stop_frame - scene_start_frame\n\ncap = cv2.VideoCapture(video_location)\n\ni = 0\ncap.set(1,scene_start_frame)\n\nfor i in range(scene_frame_amount // 2):\n _, _ = cap.read()\n ret, frame = cap.read()\n img_name = save_location + ('000000' + str(i))[-6:] + '.png'\n print(img_name)\n cv2.imwrite(img_name, frame)\n if ret == False:\n print('Doesn\\'t work')\n break\n\n\ncap.release()\ncv2.destroyAllWindows()", "sub_path": "server/scripts/auto_cut_video_gilles.py", "file_name": "auto_cut_video_gilles.py", "file_ext": "py", "file_size_in_byte": 1218, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "datetime.datetime.combine", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 13, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "336100778", "text": "from utils import handle_reaction\n\nclass AgressiveStrategy:\n hit_table = [\n \"Don't you have anything better to do ?\",\n \"Stop it now !\",\n \"I want to hit you !\",\n \"Son of a glitch !\",\n \"Okay, i'm going out to piss you off !\"\n ]\n joke_table = [\n \"Hahahahaha\",\n \"Hohahaha\",\n \"Whahahaha\",\n \"NYAHAHAHA\",\n ]\n look_table = [\n \"What do you look ?\",\n \"What are you doing !\",\n \"Get lost !\"\n ]\n\n def __init__(self, emot_ai):\n self.emot_ai = emot_ai\n self.is_personality = {\n \"hit\": emot_ai.personality[\"sensible\"] and emot_ai.personality[\"impulsive\"] and emot_ai.personality[\"pessimistic\"] and emot_ai.personality[\"demonstrative\"],\n \"joke\": emot_ai.personality[\"impulsive\"] and emot_ai.personality[\"demonstrative\"] and emot_ai.personality[\"extrovert\"],\n \"look\": emot_ai.personality[\"impulsive\"]\n }\n\n def hit(self):\n handle_reaction(self.emot_ai, self.hit_table, \"hit\")\n should_continue = not self.emot_ai.actions_memory.count_value(\"hit\") > len(self.hit_table) - 1\n return should_continue\n\n def joke(self):\n handle_reaction(self.emot_ai, self.joke_table, \"joke\")\n return True\n\n def look(self):\n handle_reaction(self.emot_ai, self.look_table, \"look\")\n return True", "sub_path": "ReactionStrategies/AgressiveStrategy.py", "file_name": "AgressiveStrategy.py", "file_ext": "py", "file_size_in_byte": 1367, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "utils.handle_reaction", "line_number": 32, "usage_type": "call"}, {"api_name": "utils.handle_reaction", "line_number": 37, "usage_type": "call"}, {"api_name": "utils.handle_reaction", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "1082561", "text": "from selenium import webdriver\nbrowser = webdriver.Chrome()\nfrom time import sleep\nfrom bs4 import BeautifulSoup\nimport os.path as osp\nimport random\n# browser.switch_to.alert.accept()\n# browser.add_cookie(\n# {'name': 'Hm_lvt_b47523672ea7966981f87de0f8661ca4', 'value': 1523956436})\n# browser.add_cookie(\n# {'name': 'Hm_lpvt_b47523672ea7966981f87de0f8661ca4', 'value': 1523956612})\nbrowser.get('http://data.xinxueshuo.cn/nsi/user/login.html')\n\n\ndef indexGen(i):\n if len(str(i)) == 1:\n return '000' + str(i)\n elif len(str(i)) == 2:\n return '00' + str(i)\n elif len(str(i)) == 3:\n return '0' + str(i)\n else:\n return str(i)\n\n\nsleep(15)\nfor i in range(1140):\n url = \"http://data.xinxueshuo.cn/nsi/school/detail.html?School_name=10\" + \\\n indexGen(i + 1) + '&whereFrom=search'\n filename = indexGen(i + 1) + '.html'\n browser.get(url)\n html = BeautifulSoup(browser.page_source, 'lxml')\n html = html.prettify()\n try:\n html = html.encode('gbk', 'ignore').decode('gbk', 'ignore')\n except UnicodeError:\n pass\n with open(osp.join('C:\\\\Users\\\\K\\\\Desktop\\\\NSIdata\\\\html', filename), 'w') as f:\n f.write(html)\n sleep(1 + random.random() * 2)\n", "sub_path": "spider.py", "file_name": "spider.py", "file_ext": "py", "file_size_in_byte": 1227, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 2, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 2, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "random.random", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "78380731", "text": "from sqlalchemy import create_engine, types\nfrom siuba.sql import LazyTbl, collect\nfrom siuba.dply.verbs import ungroup\nfrom pandas.testing import assert_frame_equal\nimport pandas as pd\nimport os\nimport numpy as np\n\ndef data_frame(*args, _index = None, **kwargs):\n if len(args):\n raise NotImplementedError(\"all arguments to data_frame must be named\")\n\n all_scalars = all(not np.ndim(v) for v in kwargs.values())\n\n if all_scalars:\n fixed = {k: [v] for k,v in kwargs.items()}\n return pd.DataFrame(fixed, index = _index)\n \n return pd.DataFrame(kwargs, index = _index)\n\nBACKEND_CONFIG = {\n \"postgresql\": {\n \"dialect\": \"postgresql\",\n \"dbname\": [\"SB_TEST_PGDATABASE\", \"postgres\"],\n \"port\": [\"SB_TEST_PGPORT\", \"5432\"],\n \"user\": [\"SB_TEST_PGUSER\", \"postgres\"],\n \"password\": [\"SB_TEST_PGPASSWORD\", \"\"],\n \"host\": [\"SB_TEST_PGHOST\", \"localhost\"],\n },\n \"sqlite\": {\n \"dialect\": \"sqlite\",\n \"dbname\": \":memory:\",\n \"port\": \"0\",\n \"user\": \"\",\n \"password\": \"\",\n \"host\": \"\"\n }\n }\n\nclass Backend:\n def __init__(self, name):\n self.name = name\n\n def dispose(self):\n pass\n\n def load_df(self, df = None, **kwargs):\n if df is None and kwargs:\n df = pd.DataFrame(kwargs)\n elif df is not None and kwargs:\n raise ValueError(\"Cannot pass kwargs, and a DataFrame\")\n\n return df\n\n def load_cached_df(self, df):\n return df\n\n def __repr__(self):\n return \"{0}({1})\".format(self.__class__.__name__, repr(self.name))\n\nclass PandasBackend(Backend):\n pass\n\nclass SqlBackend(Backend):\n table_name_indx = 0\n sa_conn_fmt = \"{dialect}://{user}:{password}@{host}:{port}/{dbname}\"\n\n def __init__(self, name):\n cnfg = BACKEND_CONFIG[name]\n params = {k: os.environ.get(*v) if isinstance(v, (list)) else v for k,v in cnfg.items()}\n\n self.name = name\n self.engine = create_engine(self.sa_conn_fmt.format(**params))\n self.cache = {}\n\n def dispose(self):\n self.engine.dispose()\n\n @classmethod\n def unique_table_name(cls):\n cls.table_name_indx += 1\n return \"siuba_{0:03d}\".format(cls.table_name_indx)\n\n def load_df(self, df = None, **kwargs):\n df = super().load_df(df, **kwargs)\n return copy_to_sql(df, self.unique_table_name(), self.engine)\n\n def load_cached_df(self, df):\n import hashlib\n from pandas import util\n hash_arr = util.hash_pandas_object(df, index=True).values\n hashed = hashlib.sha256(hash_arr).hexdigest()\n\n if hashed in self.cache:\n return self.cache[hashed]\n \n res = self.cache[hashed] = self.load_df(df)\n\n return res\n\n\ndef robust_multiple_sort(df, by):\n \"\"\"Sort a DataFrame on multiple columns, slower but more reliable than df.sort_values\n\n Note: pandas errors when you sort by multiple columns, and one has unhashable objects.\n however, it can \"sort\" a single column with unhashable objects.\n\n e.g. df.sort_values(by = ['a', 'b']) may cause an error\n\n This implementation chains sort_values on single columns. In this case,\n pandas sorts a list based on its first entry ¯\\_(ツ)_/¯.\n \"\"\"\n\n from functools import reduce\n\n out = reduce(lambda data, col: data.sort_values(col), by, df)\n\n return out.reset_index(drop = True)\n\ndef assert_frame_sort_equal(a, b, **kwargs):\n \"\"\"Tests that DataFrames are equal, even if rows are in different order\"\"\"\n df_a = ungroup(a)\n df_b = ungroup(b)\n sorted_a = robust_multiple_sort(df_a, list(df_a.columns)).reset_index(drop = True)\n sorted_b = robust_multiple_sort(df_b, list(df_b.columns)).reset_index(drop = True)\n\n assert_frame_equal(sorted_a, sorted_b, **kwargs)\n\ndef assert_equal_query(tbl, lazy_query, target, **kwargs):\n out = collect(lazy_query(tbl))\n\n if isinstance(tbl, pd.DataFrame):\n df_a = ungroup(out).reset_index(drop = True)\n df_b = ungroup(target).reset_index(drop = True)\n assert_frame_equal(df_a, df_b, **kwargs)\n else:\n assert_frame_sort_equal(out, target, **kwargs)\n\n\nPREFIX_TO_TYPE = {\n # for datetime, need to convert to pandas datetime column\n #\"dt\": types.DateTime,\n \"int\": types.Integer,\n \"float\": types.Float,\n \"str\": types.String\n }\n\ndef auto_types(df):\n dtype = {}\n for k in df.columns:\n pref, *_ = k.split('_')\n if pref in PREFIX_TO_TYPE:\n dtype[k] = PREFIX_TO_TYPE[pref]\n return dtype\n\n\ndef copy_to_sql(df, name, engine):\n if isinstance(engine, str):\n engine = create_engine(engine)\n\n df.to_sql(name, engine, dtype = auto_types(df), index = False, if_exists = \"replace\")\n return LazyTbl(engine, name)\n\n\nfrom functools import wraps\nimport pytest\n \ndef backend_notimpl(*names):\n def outer(f):\n @wraps(f)\n def wrapper(backend, *args, **kwargs):\n if backend.name in names:\n with pytest.raises(NotImplementedError):\n f(backend, *args, **kwargs)\n pytest.xfail(\"Not implemented!\")\n else:\n return f(backend, *args, **kwargs)\n return wrapper\n return outer\n\ndef backend_sql(msg):\n # allow decorating without an extra call\n if callable(msg):\n return backend_sql(None)(msg)\n\n def outer(f):\n @wraps(f)\n def wrapper(backend, *args, **kwargs):\n if not isinstance(backend, SqlBackend):\n pytest.skip(msg)\n else:\n return f(backend, *args, **kwargs)\n return wrapper\n return outer\n\ndef backend_pandas(msg):\n # allow decorating without an extra call\n if callable(msg):\n return backend_pandas(None)(msg)\n\n def outer(f):\n @wraps(f)\n def wrapper(backend, *args, **kwargs):\n if not isinstance(backend, PandasBackend):\n pytest.skip(msg)\n else:\n return f(backend, *args, **kwargs)\n return wrapper\n return outer\n", "sub_path": "siuba/tests/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 6171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.ndim", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 70, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 70, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.util.hash_pandas_object", "line_number": 91, "usage_type": "call"}, {"api_name": "pandas.util", "line_number": 91, "usage_type": "name"}, {"api_name": "hashlib.sha256", "line_number": 92, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 116, "usage_type": "call"}, {"api_name": "siuba.dply.verbs.ungroup", "line_number": 122, "usage_type": "call"}, {"api_name": "siuba.dply.verbs.ungroup", "line_number": 123, "usage_type": "call"}, {"api_name": "pandas.testing.assert_frame_equal", "line_number": 127, "usage_type": "call"}, {"api_name": "siuba.sql.collect", "line_number": 130, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 132, "usage_type": "attribute"}, {"api_name": "siuba.dply.verbs.ungroup", "line_number": 133, "usage_type": "call"}, {"api_name": "siuba.dply.verbs.ungroup", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.testing.assert_frame_equal", "line_number": 135, "usage_type": "call"}, {"api_name": "sqlalchemy.types.Integer", "line_number": 143, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 143, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Float", "line_number": 144, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 144, "usage_type": "name"}, {"api_name": "sqlalchemy.types.String", "line_number": 145, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 145, "usage_type": "name"}, {"api_name": "sqlalchemy.create_engine", "line_number": 159, "usage_type": "call"}, {"api_name": "siuba.sql.LazyTbl", "line_number": 162, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 173, "usage_type": "call"}, {"api_name": "pytest.xfail", "line_number": 175, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 170, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 190, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 187, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 205, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 202, "usage_type": "call"}]} +{"seq_id": "147607760", "text": "__author__ = 'robertsanders'\n\nfrom pyspark import SparkConf, SparkContext\nimport unittest\n\nclass WordCountPythonSparkAppTest(unittest.TestCase):\n\n master = \"local[2]\"\n\n conf = SparkConf().setAppName(__name__).setMaster(master)\n\n sc = None\n\n @staticmethod\n def setUp():\n sc = SparkContext(conf = conf)\n\n @staticmethod\n def tearDown():\n if sc != None:\n sc.stop()\n sc = None\n\n def test_upper(self):\n self.assertNotEquals(None, sc)\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "spark_workshop_codebase/wordcount/src/test/python/WordCountPythonSparkAppTest.py", "file_name": "WordCountPythonSparkAppTest.py", "file_ext": "py", "file_size_in_byte": 543, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pyspark.SparkConf", "line_number": 10, "usage_type": "call"}, {"api_name": "pyspark.SparkContext", "line_number": 16, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "149796138", "text": "__author__ = 'Dan Mapes'\n__doc__ = 'Description of Tool here'\n\nimport Autodesk.Revit.DB as DB\nfrom pyrevit import script, forms, revit\nfrom collections import Counter, defaultdict\n\ndoc = __revit__.ActiveUIDocument.Document\nuidoc = __revit__.ActiveUIDocument\n\nwith forms.WarningBar(title='Pick elements to count'):\n count_elements = revit.pick_elements()\n\nelement_categories = defaultdict(list)\nelement_family = defaultdict(list)\ncounter = 0\n\nfor el in count_elements:\n if el.ViewSpecific == True:\n continue\n fam_name = el.LookupParameter('Family and Type').AsValueString()\n el_category = el.Category.Name\n element_categories[el_category].append(el)\n element_family[fam_name].append(el)\nfor family in element_family:\n print('{0}: ({1})'.format(family, str(len(element_family[family]))))\n\n# counts = Counter(element_categories).most_common()\n# print counts\n\n# print('Category: {0}\\tFamily Name:\\t{1}{2}'.format(categories_selected, fam_names_selected, len(categories_selected)))\n", "sub_path": "pyMapes.extension/_deprecated tools/Element Count_v1.0.pushbutton/script.py", "file_name": "script.py", "file_ext": "py", "file_size_in_byte": 1005, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pyrevit.forms.WarningBar", "line_number": 11, "usage_type": "call"}, {"api_name": "pyrevit.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "pyrevit.revit.pick_elements", "line_number": 12, "usage_type": "call"}, {"api_name": "pyrevit.revit", "line_number": 12, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 14, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "54655680", "text": "import requests\r\nimport json\r\nfrom pprint import pprint\r\nimport pandas as pd\r\nfrom elasticsearch import Elasticsearch\r\nfrom elasticsearch import helpers\r\n# url = 'http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchDailyBoxOfficeList.json?key=a95b62f74f9c35f0a79ffd76ae0f927a&targetDt=20120101'\r\nurl = 'http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieList.json'\r\n#키값, 40개씩 나열, 2019년 개봉\r\n\r\nqueryParams = '?' + 'key=' + 'a95b62f74f9c35f0a79ffd76ae0f927a'+ '&itemPerPage=' + '300' \\\r\n + '&openStartDt=' + '2017' + '&openEndDt=' + '2018'\r\nurl = url + queryParams\r\n\r\nres = requests.get(url)\r\ntext = res.text\r\n\r\n\r\nmovie_list = json.loads(text)\r\npprint(movie_list)\r\nmovie_data = []\r\ni=0\r\n# movie_info = OrderedDict()\r\n# movie = [] #csv형식으로 저장\r\nfor d in movie_list['movieListResult']['movieList']:\r\n # print(movie_data['openDt'], movie_data['movieNm'], movie_data['movieNmEn'],\r\n # movie_data['typeNm'], movie_data['nationAlt'], movie_data['repGenreNm'], movie_data['directors'])\r\n #이름, 영어이름, 제작 연도, 개봉 연도, 유형, 제작 국가,\r\n movie_data.append({\r\n \"_id\" : i,\r\n \"openDt\" : d['openDt'],\r\n \"movieNm\" : d['movieNm'],\r\n \"movieEn\" : d['movieNmEn'],\r\n \"typeNm\" : d['typeNm'],\r\n \"nationAlt\" : d['nationAlt'],\r\n \"repGenreNm\" : d['repGenreNm'],\r\n \"directors\": d['directors']\r\n })\r\n if movie_data[i]['directors'] != list():\r\n movie_data[i]['directors'] = d['directors'][0]['peopleNm']\r\n i += 1\r\n else:\r\n movie_data[i]['directors'] = [None]\r\n i += 1\r\n continue\r\n\r\nprint(len(movie_data))\r\nindex_name = 'movie_info'\r\nes = Elasticsearch(['localhost'],port=9200)\r\nhelpers.bulk(es, movie_data, index=index_name)\r\n\r\n# csv형식으로 저장\r\ndata = pd.DataFrame(movie_data)\r\ndata.to_csv(\"movie1.csv\", mode='w', encoding='ms949', index=False)", "sub_path": "movie_API.py", "file_name": "movie_API.py", "file_ext": "py", "file_size_in_byte": 1945, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 19, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 20, "usage_type": "call"}, {"api_name": "elasticsearch.Elasticsearch", "line_number": 49, "usage_type": "call"}, {"api_name": "elasticsearch.helpers.bulk", "line_number": 50, "usage_type": "call"}, {"api_name": "elasticsearch.helpers", "line_number": 50, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "315285344", "text": "from ...typecheck import *\nfrom ...import core\nfrom ...import ui\n\nfrom ..dap import AdapterConfiguration\n\nimport sublime\nimport json\n\nclass Adapters:\n\tall: ClassVar[List[AdapterConfiguration]]\n\n\t@staticmethod\n\tdef initialize():\n\t\tAdapters.all = [klass() for klass in AdapterConfiguration.__subclasses__()]\n\n\t@staticmethod\n\tdef get(type: str) -> AdapterConfiguration:\n\t\tfor adapter in Adapters.all:\n\t\t\tif type == adapter.type:\n\t\t\t\treturn adapter\n\n\t\traise core.Error(f'Unable to find debug adapter with the type name \"{type}\"')\n\n\t@staticmethod\n\tdef install_menu(log: core.Logger = core.stdio):\n\t\titems = []\n\t\tfor adapter in Adapters.all:\n\t\t\tname = adapter.type\n\t\t\tinstalled_version = adapter.installed_version\n\t\t\tif installed_version:\n\t\t\t\tname += '\\t'\n\t\t\t\tname += str(installed_version)\n\n\t\t\titems.append(\n\t\t\t\tui.InputListItemChecked(\n\t\t\t\t\tlambda adapter=adapter: core.run(adapter.install(log)), #type: ignore\n\t\t\t\t\tname,\n\t\t\t\t\tname,\n\t\t\t\t\tinstalled_version != None\n\t\t\t\t)\n\t\t\t)\n\t\treturn ui.InputList(items, \"Install Debug Adapters\")\n\n\t@staticmethod\n\tasync def _insert_snippet(window: sublime.Window, snippet: dict):\n\t\tcontent = json.dumps(snippet, indent=\"\\t\")\n\t\tcontent = content.replace('\\\\\\\\', '\\\\') # remove json encoded \\ ...\n\t\tproject = window.project_file_name()\n\t\tif project:\n\t\t\tview = await core.sublime_open_file_async(window, project)\n\t\t\tregion = view.find(r'''\"\\s*debugger_configurations\\s*\"\\s*:\\s*\\[''', 0)\n\t\t\tview.sel().clear()\n\t\t\tview.sel().add(sublime.Region(region.b, region.b))\n\t\t\tview.run_command('insert', {\n\t\t\t\t'characters': '\\n'\n\t\t\t})\n\t\t\tview.run_command('insert_snippet', {\n\t\t\t\t'contents': content + ','\n\t\t\t})\n\t\telse:\n\t\t\tsublime.set_clipboard(content)\n\t\t\tcore.display('Unable to insert configuration into sublime-project file: Copied to clipboard instead')\n\n\t@staticmethod\n\tdef add_configuration():\n\t\tdef insert_custom(type: str, request: str):\n\t\t\tcore.run(Adapters._insert_snippet(sublime.active_window(), {\n\t\t\t\t\t'name': f'Debug {type}',\n\t\t\t\t\t'type': type,\n\t\t\t\t\t'request': request,\n\t\t\t\t}))\n\n\t\tvalues = []\n\n\t\tfor adapter in Adapters.all:\n\t\t\tif not adapter.installed_version:\n\t\t\t\tcontinue\n\n\t\t\tsnippet_input_items = []\n\n\t\t\tfor snippet in adapter.configuration_snippets or []:\n\t\t\t\tdef insert(snippet=snippet):\n\t\t\t\t\tinsert = snippet.get('body', '{ error: no body field}')\n\t\t\t\t\tcore.run(Adapters._insert_snippet(sublime.active_window(), insert))\n\n\t\t\t\tsnippet_input_items.append(ui.InputListItem(insert, snippet.get('label', 'label')))\n\n\t\t\tif not snippet_input_items:\n\t\t\t\tsnippet_input_items.append(ui.InputListItem(lambda adapter=adapter: insert_custom(adapter.type, \"launch\"), 'Launch'))\n\t\t\t\tsnippet_input_items.append(ui.InputListItem(lambda adapter=adapter: insert_custom(adapter.type, \"attach\"), 'Attach'))\n\n\t\t\tvalues.append(ui.InputListItem(ui.InputList(snippet_input_items, \"choose a snippet to insert\"), adapter.type))\n\n\t\treturn ui.InputList(values, placeholder=\"choose a configuration type\")\n\n\t@staticmethod\n\tdef recalculate_schema():\n\t\tfrom .schema import save_schema\n\t\tsave_schema(Adapters.all)\n", "sub_path": "modules/debugger/adapter/adapters.py", "file_name": "adapters.py", "file_ext": "py", "file_size_in_byte": 3016, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "dap.AdapterConfiguration", "line_number": 11, "usage_type": "name"}, {"api_name": "dap.AdapterConfiguration.__subclasses__", "line_number": 15, "usage_type": "call"}, {"api_name": "dap.AdapterConfiguration", "line_number": 15, "usage_type": "name"}, {"api_name": "dap.AdapterConfiguration", "line_number": 18, "usage_type": "name"}, {"api_name": "sublime.Window", "line_number": 46, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "sublime.Region", "line_number": 54, "usage_type": "call"}, {"api_name": "sublime.set_clipboard", "line_number": 62, "usage_type": "call"}, {"api_name": "sublime.active_window", "line_number": 68, "usage_type": "call"}, {"api_name": "sublime.active_window", "line_number": 85, "usage_type": "call"}, {"api_name": "schema.save_schema", "line_number": 100, "usage_type": "call"}, {"api_name": "{'save_schema': 'schema.save_schema'}.all", "line_number": 100, "usage_type": "attribute"}]} +{"seq_id": "470849329", "text": "import tensorflow as tf\nimport numpy as np\nimport sys\nfrom optparse import OptionParser\nimport os\nimport utils as utl\n\ndef RNN(x, weights, biases, timesteps, num_hidden):\n\n \n # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)\n x = tf.unstack(x, timesteps, 1)\n\n # Define a lstm cell with tensorflow\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)\n # Get lstm cell output\n outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x, dtype=tf.float32)\n\n # Linear activation, using rnn inner loop last output\n return tf.matmul(outputs[-1], weights['out']) + biases['out']\n\n\ndef main():\n if (len(sys.argv) <= 1):\n print(\"infer.py -h or --help to get guideline of input options\")\n exit()\n use = \"Usage: %prog [options] filename\"\n parser = OptionParser(usage = use)\n parser.add_option(\"-d\", \"--input-dir\", dest=\"input_dir\", action=\"store\", type=\"string\", help=\"input data dir\")\n parser.add_option(\"-t\", \"--timesteps\", dest=\"timesteps\", action=\"store\", type=\"int\", help=\"timesteps\")\n parser.add_option(\"-n\", \"--num-input\", dest=\"num_input\", action=\"store\", type=\"int\", help=\"number of input (input vector's width)\")\n parser.add_option(\"-c\", \"--ckpt-dir\", dest=\"ckpt_dir\", action=\"store\", type=\"string\", help=\"directory of checkpoint\")\n\n (options, args) = parser.parse_args()\n input_dir = options.input_dir\n timesteps = options.timesteps\n num_input = options.num_input\n #ckpt_dir = options.ckpt_dir\n len_status = 3\n\n X = np.fromfile(input_dir + '/X.dat', dtype=float)\n cardinality = int(X.shape[0]/(timesteps * num_input + len_status+1))\n X = X.reshape([cardinality, timesteps*num_input + len_status+1])\n Y = np.fromfile(input_dir + '/Y.dat', dtype=float)\n train_x, val_x, test_x, train_y, val_y, test_y = utl.train_val_test_split(X, Y, split_frac=0.80)\n \n # Training Parameters\n learning_rate = 0.001\n epochs =800 \n batch_size = 40\n #display_step = 200\n \n # Network Parameters\n #num_input = 2 \n #timesteps = 480 \n num_hidden = 4096 \n num_classes = 1\n \n print(\"### Network Parameters ###\")\n print(\"Learning Rate: {}\".format(learning_rate))\n print(\"Batch Size: {}\".format(batch_size))\n print(\"Size of Hidden Layer: {}\".format(num_hidden))\n print(\"Timestep: {}\".format(timesteps)) \n print(\"------------------\")\n X_ = tf.placeholder(\"float\", [None, timesteps, num_input])\n X_status = tf.placeholder(\"float\", [None, len_status])\n Y_ = tf.placeholder(\"float\", [None, num_classes])\n lr = tf.placeholder(\"float\")\n \n weights = {\n 'out':tf.Variable(tf.random_normal([num_hidden,num_classes])),\n 'status':tf.Variable(tf.random_normal([len_status, num_classes]))\n }\n biases = {\n 'out':tf.Variable(tf.random_normal([num_classes]))\n }\n seq_embed = RNN(X_, weights, biases, timesteps, num_hidden)\n prediction = seq_embed + tf.matmul(X_status, weights['status']) \n #prediction = seq_embed + tf.matmul(X_status, weights['status'][:2]) \n #prediction = seq_embed\n \n loss_op = tf.losses.mean_squared_error(Y_, prediction)\n #optimizer = tf.train.AdadeltaOptimizer(lr).minimize(loss_op)\n #optimizer = tf.train.AdamOptimizer(lr).minimize(loss_op)\n optimizer = tf.train.GradientDescentOptimizer(lr).minimize(loss_op)\n \n correct_pred = tf.equal(tf.cast( (prediction/1.8) - tf.round(prediction/1.8), tf.float32), tf.cast( (prediction/1.8)-tf.round(Y_/1.8), tf.float32))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n \n # Restore the ckpt\n SAVER_DIR = options.ckpt_dir \n saver = tf.train.Saver()\n checkpoint_path = os.path.join(SAVER_DIR, SAVER_DIR)\n ckpt = tf.train.get_checkpoint_state(SAVER_DIR)\n \n \n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n #new_saver = tf.train.import_meta_graph('ckpt.meta')\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\"weight of status varible\")\n print( weights['status'].eval())\n x_seq = X[:, :timesteps*num_input]\n x_status = X[:, timesteps*num_input:timesteps*num_input+len_status]\n x_status = utl.norm_status(x_status, [0,1])\n x_seq = x_seq.reshape((-1, timesteps, num_input))\n pred = np.array(prediction.eval(feed_dict = {X_:x_seq, X_status:x_status, Y_:Y[:, None]}))\n \n pred_diagnosis = [1 if x[0]>=1.8 else 0 for x in list(pred)]\n y_diagnosis = [1 if x>=1.8 else 0 for x in list(Y)]\n evaluation = np.equal(pred_diagnosis, y_diagnosis)\n print(\"accuracy\")\n print(np.mean(evaluation))\n f = open(SAVER_DIR+'/result.txt', 'w')\n for i in range(0, len(Y)):\n f.write(str(pred[i][0]) + ', ' + str(Y[i])+'\\n')\n f2 = open(SAVER_DIR+'/result_diagnosis.txt', 'w')\n for i in range(0, len(Y)):\n f2.write(str(pred_diagnosis[i]) + ', ' + str(y_diagnosis[i])+'\\n')\n f2.close()\n f.close()\n# sess.close()\n\n\nif __name__==\"__main__\":\n main()\n", "sub_path": "infer_categorical.py", "file_name": "infer_categorical.py", "file_ext": "py", "file_size_in_byte": 5068, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "tensorflow.unstack", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.contrib.rnn.BasicLSTMCell", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.rnn.static_rnn", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 24, "usage_type": "attribute"}, {"api_name": "optparse.OptionParser", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.fromfile", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.fromfile", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.train_val_test_split", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.losses.mean_squared_error", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.losses", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tensorflow.equal", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.round", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tensorflow.train.get_checkpoint_state", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 99, "usage_type": "call"}, {"api_name": "utils.norm_status", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "43326078", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/pykzee/Tree.py\n# Compiled at: 2019-10-25 08:08:53\n# Size of source mod 2**32: 4181 bytes\nimport collections\nfrom pyimmutable import ImmutableDict\nfrom pykzee.common import Undefined, setDataForPath\n\nclass Tree:\n\n class RegisteredCommand:\n __slots__ = ('function', 'doc', 'unregister', 'disabled')\n\n def __init__(self, function, doc, unregister):\n self.function = function\n self.doc = doc\n self.unregister = unregister\n self.disabled = False\n\n TreeAccess = collections.namedtuple('TreeAccess', ('set', 'submitState', 'registerCommand',\n 'createSubtree', 'clear',\n 'deactivate'))\n\n def __init__(self, parent, path, *, immediate_updates=True):\n self._Tree__parentSet = parent.set\n self._Tree__parentRegisterCommand = parent.registerCommand\n self._Tree__path = path\n self._Tree__state = ImmutableDict()\n self._Tree__reportedState = Undefined\n self._Tree__registeredCommands = {}\n self._Tree__immediate_updates = immediate_updates\n self._Tree__deactivated = False\n self._Tree__hidden = False\n\n @property\n def path(self):\n return self._Tree__path\n\n def getAccessProxy(self):\n return self.TreeAccess(self.set, self.submitState, self.registerCommand, self.createSubtree, self.clear, self.deactivate)\n\n def set(self, path, value):\n self._Tree__state = setDataForPath(self._Tree__state, path, value)\n if self._Tree__immediate_updates:\n self.submitState()\n\n def registerCommand(self, path, name, function, doc=Undefined):\n if doc is Undefined:\n doc = function.__doc__\n else:\n existing_rc = self._Tree__registeredCommands.get((path, name))\n if existing_rc is not None:\n existing_rc.disabled = True\n existing_rc.unregister()\n if self._Tree__hidden:\n unregister = no_op\n else:\n unregister = self._Tree__parentRegisterCommand(self._Tree__path + path, name, function, doc)\n rc = self.RegisteredCommand(function, doc, unregister)\n self._Tree__registeredCommands[(path, name)] = rc\n\n def unregister_command():\n if not rc.disabled:\n rc.disabled = True\n del self._Tree__registeredCommands[(path, name)]\n rc.unregister()\n\n return unregister_command\n\n def createSubtree(self, path, *, immediate_updates=True):\n return Tree(self, path, immediate_updates=immediate_updates)\n\n def clear(self):\n for rc in self._Tree__registeredCommands.values():\n rc.disabled = True\n rc.unregister()\n\n self._Tree__registeredCommands = {}\n self._Tree__state = ImmutableDict()\n self._Tree__reportedState = Undefined\n self._Tree__parentSet(self._Tree__path, Undefined)\n\n def deactivate(self):\n if not self._Tree__deactivated:\n self.clear()\n self._Tree__parentSet = self._Tree__parentRegisterCommand = raise_deactivated\n self._Tree__deactivated = True\n\n def hide(self):\n if self._Tree__hidden:\n return\n for rc in self._Tree__registeredCommands.values():\n rc.unregister()\n rc.unregister = no_op\n\n self._Tree__parentSet(self._Tree__path, Undefined)\n self._Tree__hidden = True\n\n def show(self, new_path=None):\n if not self._Tree__hidden:\n if not new_path is None:\n if new_path == self._Tree__path:\n return\n self.hide()\n if new_path is not None:\n self._Tree__path = new_path\n self._Tree__parentSet(self._Tree__path, self._Tree__state)\n self._Tree__hidden = False\n self._Tree__reportedState = self._Tree__state\n for (path, name), rc in self._Tree__registeredCommands.items():\n rc.unregister = self._Tree__parentRegisterCommand(self._Tree__path + path, name, rc.function, rc.doc)\n\n def submitState(self):\n if self._Tree__reportedState is not self._Tree__state:\n if not self._Tree__hidden:\n self._Tree__parentSet(self._Tree__path, self._Tree__state)\n self._Tree__reportedState = self._Tree__state\n\n\ndef raise_deactivated(*args, **kwargs):\n raise Exception('Subtree has been deactivated')\n\n\ndef no_op():\n pass", "sub_path": "pycfiles/pykzee-0.1.1-py3.7/Tree.cpython-37.py", "file_name": "Tree.cpython-37.py", "file_ext": "py", "file_size_in_byte": 4700, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "collections.namedtuple", "line_number": 23, "usage_type": "call"}, {"api_name": "pyimmutable.ImmutableDict", "line_number": 31, "usage_type": "call"}, {"api_name": "pykzee.common.Undefined", "line_number": 32, "usage_type": "name"}, {"api_name": "pykzee.common.setDataForPath", "line_number": 46, "usage_type": "call"}, {"api_name": "pykzee.common.Undefined", "line_number": 50, "usage_type": "name"}, {"api_name": "pykzee.common.Undefined", "line_number": 51, "usage_type": "name"}, {"api_name": "pyimmutable.ImmutableDict", "line_number": 82, "usage_type": "call"}, {"api_name": "pykzee.common.Undefined", "line_number": 83, "usage_type": "name"}, {"api_name": "pykzee.common.Undefined", "line_number": 84, "usage_type": "argument"}, {"api_name": "pykzee.common.Undefined", "line_number": 99, "usage_type": "argument"}]} +{"seq_id": "302695826", "text": "from flask import Flask, render_template, url_for, flash, redirect, request, get_template_attribute, send_from_directory\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_jsonpify import jsonify\nimport requests\nimport numpy as np\nimport nibabel as nb\nimport matplotlib.pyplot as plt\nimport os\nimport datetime as dt\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore, initialize_app, db, storage\nimport tensorflow as tf\n\n## turn into a class based api with methods get post put etc.\n\napp = Flask(__name__)\n\n# temporary data bas for storing images\napp.config[\"UPLOAD_FOLDER\"] = \"static/\"\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\napi = Api(app)\n\nconfig = credentials.Certificate(\"jsonconst/key.json\")\nfirebase_admin.initialize_app(config, {\n 'storageBucket': 'niiwebsite-794a2.appspot.com',\n 'databaseURL' : 'https://niiwebsite-794a2.firebaseio.com/'\n})\n\nref = db.reference('filenames')\nniifiles = storage.bucket()\nSTANDARD= dt.datetime(2030, month = 12, day = 30)\nbackendurl = \"http://127.0.0.1:5000/getmask/\"\nMODEL_FILE = open(\"jsonconst/model.json\")\nLOADED_MODEL = MODEL_FILE.read()\nMODEL_FILE.close()\nMODEL = None\n\n@app.route('/')\ndef index():\n ref.delete()\n files = niifiles.list_blobs()\n niifiles.delete_blobs(files)\n return render_template('base.html')\n\n@app.route('/getimg', methods = ['POST', 'GET'])\ndef getimg():\n if request.method == 'POST':\n file = request.files['file']\n if file == None:\n return redirect(url_for('index'))\n elif file == '':\n return redirect(url_for('index'))\n else:\n file.save(app.config[\"UPLOAD_FOLDER\"] + file.filename)\n print(file.filename)\n name = add_file(file.filename, file.filename)\n # keys = requests.put(backendurl + file.filename)\n # keys = keys.json()\n # url = None\n # for key in keys.keys():\n # if \"slice\" in key:\n # url = keys[key]['url']\n # dimg = get_template_attribute(\"base.html\", \"geturl1\")\n # dimg(url)\n #\n # keys = requests.get(backendurl + file.filename)\n # keys = keys.json()\n # url2 = None\n # file2 = None\n # for key in keys.keys():\n # if \"sliceog\" in key:\n # url2 = keys[key]['url']\n # elif \"mask\" in key:\n # file2 = key\n # dimg = get_template_attribute(\"base.html\", \"geturl2\")\n # dimg(url2)\n # return \"done\"\n #redirect(url_for('processnii', filename = file.filename))\n return redirect(url_for('load_img1', filename = file.filename))\n\n@app.route('/load_img1/')\ndef load_img1(filename):\n keys = requests.put(backendurl + filename)\n keys = keys.json()\n url = None\n for key in keys.keys():\n if \"slice\" in key:\n url = keys[key]['url']\n dimg = get_template_attribute(\"base.html\", \"geturl1\")\n\n keys = requests.get(backendurl + filename)\n keys = keys.json()\n url2 = None\n file2 = None\n for key in keys.keys():\n if \"sliceog\" in key:\n url2 = keys[key]['url']\n elif \"mask\" in key:\n file2 = key\n dimg = get_template_attribute(\"base.html\", \"geturl2\")\n dimg(url2)\n\n fin = niifiles.get_blob(keys[file2]['name'])\n fin.download_to_filename(app.config[\"UPLOAD_FOLDER\"] + keys[file2]['name'])\n #return send_from_directory(directory = app.config[\"UPLOAD_FOLDER\"], filename=keys[file2]['name'])\n\n os.remove(app.config[\"UPLOAD_FOLDER\"] + keys[file2]['name'])\n keyfil = requests.delete(backendurl + filename)\n return render_template(\"base.html\", urlslice = url, urlslice1 = url2)#, send_from_directory(directory = app.config[\"UPLOAD_FOLDER\"], filename=keys[file2]['name'])\n\n\n\ndef add_file(namedb, name, nchildname = None, delete = 't'):\n nii = niifiles.blob(namedb)\n with open(app.config[\"UPLOAD_FOLDER\"] + name, 'rb') as file:\n nii.upload_from_file(file)\n if delete == 't':\n os.remove(app.config[\"UPLOAD_FOLDER\"] + name)\n if nchildname == None:\n file_ref = ref.child(namedb.replace('/', '~').replace('.', '>'))\n file_ref.set({'name': namedb, 'url' : nii.generate_signed_url(STANDARD)})\n else:\n file_ref = ref.child(nchildname).child(namedb.replace('/', '~').replace('.', '>'))\n file_ref.set({'name': namedb, 'url': nii.generate_signed_url(STANDARD)})\n print(ref.get())\n return ref.get()\n\nif __name__ == '__main__':\n app.jinja_env.auto_reload = True\n app.run(debug = True, port=5001)\n", "sub_path": "backend/src/flaskfrontendtest2.py", "file_name": "flaskfrontendtest2.py", "file_ext": "py", "file_size_in_byte": 4656, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.Flask", "line_number": 17, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 22, "usage_type": "call"}, {"api_name": "firebase_admin.credentials.Certificate", "line_number": 24, "usage_type": "call"}, {"api_name": "firebase_admin.credentials", "line_number": 24, "usage_type": "name"}, {"api_name": "firebase_admin.initialize_app", "line_number": 25, "usage_type": "call"}, {"api_name": "firebase_admin.db.reference", "line_number": 30, "usage_type": "call"}, {"api_name": "firebase_admin.db", "line_number": 30, "usage_type": "name"}, {"api_name": "firebase_admin.storage.bucket", "line_number": 31, "usage_type": "call"}, {"api_name": "firebase_admin.storage", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 80, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.get_template_attribute", "line_number": 90, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.get_template_attribute", "line_number": 101, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 108, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 110, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "292968992", "text": "from flask import Flask, render_template, request, redirect, session\napp = Flask(__name__)\n\napp.secret_key = \"kavic84nses09bc03ncdald\"\n\ndef incrementor():\n try:\n session['counter'] += 1\n except KeyError:\n session['counter'] = 1\n\n@app.route('/')\ndef index():\n incrementor()\n return render_template('index.html')\n\n@app.route('/ninja')\ndef ninja():\n incrementor()\n return redirect('/')\n\n@app.route('/hacker')\ndef hacker():\n session['counter'] = 0\n return redirect('/')\n\napp.run(debug = True)\n", "sub_path": "Counter/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 527, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.Flask", "line_number": 2, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 8, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 10, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "276894761", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/will/projects/moya/moya/cache/memorycache.py\n# Compiled at: 2017-01-15 13:25:40\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom ..cache import Cache\nfrom collections import OrderedDict, namedtuple\nfrom time import time as get_time\nimport logging\nlog = logging.getLogger(b'moya.runtime')\nCacheEntry = namedtuple(b'CacheEntry', b'value,expire_time')\n\nclass MemoryCache(Cache):\n \"\"\"Caches in a memcached server\"\"\"\n cache_backend_name = b'memory'\n\n def __init__(self, name, namespace, compress=True, compress_min=1024, size=1048576):\n super(MemoryCache, self).__init__(name, namespace, compress=compress, thread_safe=False)\n self.max_size = size\n self.entries = OrderedDict()\n self.size = 0\n\n @classmethod\n def initialize(cls, name, settings):\n return cls(name, settings.get(b'namespace', b''), compress=settings.get_bool(b'compress', True), compress_min=settings.get_int(b'compress_min', 16384), size=settings.get_int(b'size', 1024) * 1024)\n\n def evict_entry(self, key):\n \"\"\"Evict a single key.\"\"\"\n if key in self.entries:\n entry = self.entries.pop(key)\n self.size -= len(entry.value)\n\n def reclaim(self, num_bytes):\n \"\"\"Reclaim at least `num_bytes`\"\"\"\n log.debug(b'%r size=%s bytes', self, self.size)\n log.debug(b'%r reclaiming %s bytes', self, num_bytes)\n reclaimed = 0\n while self.entries and reclaimed < num_bytes:\n key, entry = self.entries.popitem(last=False)\n log.debug(b'%r evicting %r', self, key)\n deleted_bytes_count = len(entry.value)\n self.size -= deleted_bytes_count\n reclaimed += deleted_bytes_count\n\n return reclaimed >= num_bytes\n\n def _get(self, key, default):\n try:\n entry = self.entries.pop(key)\n except KeyError:\n return default\n\n value_bytes = entry.value\n value_size = len(value_bytes)\n self.size -= value_size\n if entry.expire_time and get_time() > entry.expire_time:\n return default\n self.entries[key] = entry\n self.size += value_size\n return self.decode_value(value_bytes)\n\n def _set(self, key, value, time):\n value_bytes = self.encode_value(value)\n value_size = len(value_bytes)\n if value_size > self.max_size:\n return\n else:\n self.evict_entry(key)\n if self.size + value_size > self.max_size:\n if not self.reclaim(self.size + value_size - self.max_size):\n return\n expire_time = None if time == 0 else get_time() + time / 1000.0\n self.entries[key] = CacheEntry(value_bytes, expire_time)\n self.size += value_size\n return\n\n def _delete(self, key):\n if key in self.entries:\n self.size -= len(self.entries.pop(key).value)\n\n\nif __name__ == b'__main__':\n cache = MemoryCache(b'test', b'')\n cache.set(b'foo', b'bar')\n print(cache.get(b'foo'))\n print(cache.encode_value(b'value'))", "sub_path": "pycfiles/moya-0.6.20-py2.py3-none-any/memorycache.py", "file_name": "memorycache.py", "file_ext": "py", "file_size_in_byte": 3259, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 14, "usage_type": "call"}, {"api_name": "cache.Cache", "line_number": 16, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 23, "usage_type": "call"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "time.time", "line_number": 75, "usage_type": "call"}, {"api_name": "cache.set", "line_number": 87, "usage_type": "call"}, {"api_name": "cache.get", "line_number": 88, "usage_type": "call"}, {"api_name": "cache.encode_value", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "637759339", "text": "from django.urls import path\nfrom shop import views\n\nurlpatterns = [\n path('', views.HomeView.as_view(), name=\"index\"),\n path('category/', views.view_category, name=\"category\"),\n path('cart/', views.CartView.as_view(), name='cart'),\n # user actions\n path('edit/', views.edit_profile, name=\"edit\"),\n path('login/', views.log_in, name=\"login\"),\n path('logout/', views.log_out, name='log_out'),\n path('register/', views.register, name=\"register\"),\n\n]\n", "sub_path": "shop/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 472, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "shop.views.HomeView.as_view", "line_number": 5, "usage_type": "call"}, {"api_name": "shop.views.HomeView", "line_number": 5, "usage_type": "attribute"}, {"api_name": "shop.views", "line_number": 5, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "shop.views.view_category", "line_number": 6, "usage_type": "attribute"}, {"api_name": "shop.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "shop.views.CartView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "shop.views.CartView", "line_number": 7, "usage_type": "attribute"}, {"api_name": "shop.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "shop.views.edit_profile", "line_number": 9, "usage_type": "attribute"}, {"api_name": "shop.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "shop.views.log_in", "line_number": 10, "usage_type": "attribute"}, {"api_name": "shop.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "shop.views.log_out", "line_number": 11, "usage_type": "attribute"}, {"api_name": "shop.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "shop.views.register", "line_number": 12, "usage_type": "attribute"}, {"api_name": "shop.views", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "206334985", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic import ListView, DetailView\n\nfrom .models import Post, Category\n#from .forms import CommentForm\n# Create your views here.\n\n'''\n알아봐야할 것들\n\n1.카테고리를 생성하기 위해 Author 혹은 User를 설정해야 한다.\n회원가입을 한 유저중 카테고리를 만들 권한을 선택적으로 부여하게 만들 것이다.\nauth를 장고에서 기본으로 제공하는 것들로 만들었는데\n쿼리셋 등을 이용해서 어떻게 username을 가져와야할지 모르겠다.\n\n2.view에서 CRUD를 자유자재로 사용하는 방법을 공부해야겠다.\nadmin페이지를 사용하지 않고 CRUD를 할 수 있게 만들것이다.\n'''\n\ndef getCategoryIdNum(cg_num):\n posts = Post.objects.all()\n category_num = posts.filter(category_id = cg_num)\n return len(category_num)\n\n'''\ndef getPage(post_list):\n paginator = Paginator(post_list, 5)\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n'''\n\ndef viewMain(request):\n post_list = Post.objects.all()\n category_list = Category.objects.all()\n\n paginator = Paginator(post_list, 5)\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n return render(request, 'blog/main.html', {\n 'category_list' : category_list,\n 'page_list' : posts,\n 'category_num' : [getCategoryIdNum(x) for x in range(1, len(category_list)+1)]\n })\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {\n 'post' : post\n })\n\n'''\nclass PostDetail(DetailView):\n model = Post\n'''\n\n''' 삽질의 흔적들..... 다시보고 똑같은 실수를 안하기 위해 남겨둔다.\nclass CategoryList(ListView):\n model = Category\n context_object_name = 'categories'\n\nclass PostList(ListView):\n model = Post\n template_name = 'blog/post_list.html'\n context_object_name = 'posts'\n paginate_by = 8\n'''\n\n'''\nclass ViewMain(TemplateView):\n template_name = 'blog/main.html'\n\n def post_list():\n post_list = Post.objects.all()\n return redirect(post_list)\n\n def category_list():\n category_list = Category.objects.all()\n return redirect(category_list)\n'''\n\n'''\ndef comment_new(request, pk):\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = Post.objects.get(pk=pk)\n comment.save()\n return redirect('post_detail', pk)\n else:\n form = CommentForm()\n return render(request, 'blog/post_from_comment.html', {'form':form})\n\ndef comment_edit(request, post_pk, pk):\n comment = Comment.objects.get(pk=post_pk)\n if request.method == 'POST':\n form = CommentForm(request.POST, instance=comment)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = Post.objects.get(pk=post_pk)\n comment.save()\n return redirect('post_detail', post_pk)\n else:\n form = CommentForm(instance=comment)\n return render(request, 'blog/post_from_comment.html', {'form':form})\n'''\n", "sub_path": "blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3641, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "models.Post.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 23, "usage_type": "name"}, {"api_name": "models.Post.objects.all", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 40, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 41, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 43, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 47, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 49, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 52, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 59, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "398910954", "text": "def mex(arr):\n i = 0\n while True:\n if arr.count(i) != 0:\n i += 1\n else:\n return i\n\n\ndef xor_gate(x, y):\n from bitstring import BitArray\n x = \"{0:b}\".format(x)\n y = \"{0:b}\".format(y)\n while len(x) != len(y):\n if len(x) < len(y):\n x = \"0\" + str(x)\n else:\n y = \"0\" + str(y)\n i = 0\n p = \"\"\n while i < min(len(x), len(y)):\n if x[i] == y[i]:\n p = str(p) + \"0\"\n else:\n p = str(p) + \"1\"\n i += 1\n b = BitArray(bin=p)\n return b.uint\n\n\ndef f(n):\n if n == 0 or n == 1:\n return 0\n else:\n arr = []\n k = 0\n while k < n:\n arr.append(xor_gate(f(k), f(n-k-1)))\n k += 1\n return mex(arr)\n\n", "sub_path": "grim.py", "file_name": "grim.py", "file_ext": "py", "file_size_in_byte": 780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "bitstring.BitArray", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "268228533", "text": "import requests\nimport time\nimport Adafruit_DHT\n\ntemp=0\nhum=0\nsensortype=11\nsensorpin=21\nwhile True:\n humidity, temperature = Adafruit_DHT.read_retry(sensortype, sensorpin)\n\n temp1=str(temperature)\n hum1=str(humidity)\n\n \n url='https://api.thingspeak.com/update?api_key=ITZTYZIAKC6GZ1T4&field1='+temp1+'&field2='+hum1\n requests.get(url)\n print('Temperature:'+temp1)\n print('Humidity:'+hum1)\n print('******************************')\n time.sleep(10)\n \n \n\n", "sub_path": "thingspeak_dht.py", "file_name": "thingspeak_dht.py", "file_ext": "py", "file_size_in_byte": 488, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "Adafruit_DHT.read_retry", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "621097564", "text": "import json\nimport requests\nimport csv \nimport time\n\nclass StockFinancialsCrawler:\n\n def __init__ (self):\n self.session = requests.Session()\n self.final_stock_financials_tickers_list = []\n self.final_stock_financials_dict_list = []\n\n def _go_tickers(self,url):\n tickersRequest = requests.get(url)\n assert tickersRequest.status_code == 200\n time.sleep(12)\n return tickersRequest\n \n def _get_tickers(self,tickersRequest):\n dictTickers = json.loads(tickersRequest.text)\n return dictTickers\n\n def _go_ticker_details(self,i):\n tickerDetailsRequest = requests.get(\"https://api.polygon.io/v1/meta/symbols/\"+str(i)+\"/company?&apiKey=fXeNsEuF5_RYIgNgFae7LsdpGSz_jAxn\").text\n assert tickerDetailsRequest.status_code == 200\n time.sleep(12)\n return tickerDetailsRequest\n\n def _get_ticker_details(self,tickerDetailsRequest):\n dictTickersDetails = json.loads(tickerDetailsRequest)\n return dictTickersDetails \n\n def _get_next_cursor(self,ticker_next_request):\n print(ticker_next_request.status_code) # status code la requete = 401 => OK\n if ticker_next_request.status_code == 200: \n next_tickers_result = json.loads(ticker_next_request.text)\n tickers = next_tickers_result[\"results\"]\n return tickers\n else:\n return None \n\n def _go_stock_financials_details(self,companyTicker):\n stockFinancialsRequest = requests.get(\"https://api.polygon.io/v2/reference/financials/\"+str(companyTicker)+\"?limit=3&type=T&apiKey=fXeNsEuF5_RYIgNgFae7LsdpGSz_jAxn\")\n assert stockFinancialsRequest.status_code == 200\n time.sleep(12)\n return stockFinancialsRequest\n \n def _get_stock_financials_details(self,stockFinancialsRequest):\n dictstockFinancials = json.loads(stockFinancialsRequest.text)\n return dictstockFinancials\n\n def _go_aggregates_details(self,companyTicker):\n url = \"https://api.polygon.io/v2/aggs/ticker/\"+str(companyTicker)+\"/range/1/week/2020-08-19/2021-08-19?adjusted=true&sort=asc&limit=120&apiKey=fXeNsEuF5_RYIgNgFae7LsdpGSz_jAxn\"\n aggreagatesRequests = requests.get(url)\n assert aggreagatesRequests.status_code == 200\n time.sleep(12)\n print(url)\n return aggreagatesRequests\n \n def _get_aggregates_details(self,aggreagatesRequests):\n dictAggregates = json.loads(aggreagatesRequests.text) # sous la forme {.... results:{ {v:..}, {v:..}, {v:..} }}\n return dictAggregates \n\n def _import_features_into_csv(self,final_stock_financials_dict_list):\n with open(\"C:\\\\Users\\\\CYTech Student\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python39\\\\Scripts\\\\financialsInformation.csv\",'w', encoding='utf-8') as financialsInfo:\n featuresNames = [\"Id\",\"tickerId\", \"ticker\",\"period\",\"calendar_date\",\"report_period\",\"updated\",\"date_key\",\"accumulated_other_comprehensive_income\",\"assets\",\"assets_current\",\"assets_non_current\",\"book_value_per_share\",\"capital_expenditure\",\"cash_and_equivalents\",\"cash_and_equivalents_usd\",\"cost_of_revenue\",\"consolidated_income\"\n ,\"current_ratio\",\"debt_to_equity_ratio\",\"debt\",\"debt_current\",\"debt_non_current\",\"debt_usd\",\"deferred_revenue\",\"depreciation_amortization_and_accretion\",\"deposits\",\"dividend_yield\",\"dividends_per_basic_common_share\",\"earning_before_interest_taxes\",\"earnings_before_interest_taxes_depreciation_amortization\",\"ebitda_margin\",\"earnings_before_interest_taxes_depreciation_amortization_usd\",\n \"earning_before_interest_taxes_usd\",\"earnings_before_tax\",\"earnings_per_basic_share\",\"earnings_per_diluted_share\",\"earnings_per_basic_share_usd\",\"shareholders_equity\",\"shareholders_equity_usd\",\"enterprise_value\",\"enterprise_value_over_ebit\",\"enterprise_value_over_ebita\",\"free_cash_flow\",\"free_cash_flow_per_share\",\n \"foreign_currency_usd_exchange_rate\",\"gross_profit\",\"gross_margin\",\"goodwill_and_intangible_assets\",\"interest_expense\",\"invested_capital\",\"inventory\",\"investments\",\"investments_current\",\"investments_non_current\",\"total_liabilities\",\"current_liabilities\",\"liabilities_non_current\",\"market_capitalization\",\"net_cash_flow\",\n \"net_cash_flow_business_acquisitions_disposals\",\"issuance_equity_shares\",\"issuance_debt_securities\",\"payment_dividends_other_cash_distributions\",\"net_cash_flow_from_financing\",\"net_cash_flow_from_investing\",\"net_cash_flow_investment_acquisitions_disposals\",\"net_cash_flow_from_operations\",\"effect_of_exchange_rate_changes_on_cash\",\"net_income\",\n \"net_income_common_stock\",\"net_income_common_stock_usd\",\"net_loss_income_from_discontinued_operations\",\"net_income_to_non_controlling_interests\",\"profit_margin\",\"operating_expenses\",\"operating_income\",\"trade_and_non_trade_payables\",\"payout_ratio\",\"price_to_book_value\",\"price_earnings\",\"price_to_earnings_ratio\",\"property_plant_equipment_net\",\n \"preferred_dividends_income_statement_impact\",\"share_price_adjusted_close\",\"price_sales\",\"price_to_sales_ratio\",\"trade_and_non_trade_receivables\",\"accumulated_retained_earnings_deficit\",\"revenues\",\"revenues_usd\",\"research_and_development_expense\",\"return_on_sales\",\"share_based_compensation\",\"selling_general_and_administrative_expense\",\"share_factor\",\n \"shares\",\"weighted_average_shares\",\"weighted_average_shares_diluted\",\"sales_per_share\",\"tangible_asset_value\",\"tax_assets\",\"income_tax_expense\",\"tax_liabilities\",\"tangible_assets_book_value_per_share\",\"working_capital\"]\n\n writer = csv.DictWriter(financialsInfo, fieldnames=featuresNames) \n writer.writeheader()\n countId = 0\n countTickerId = 1\n print(\"in the import method before the loop:\"+str(len(final_stock_financials_dict_list)))\n print(final_stock_financials_dict_list) # sous la forme { {..,results:[ {}{}{}] }, {..,results:[ {}{}{}] }, {..,results:[ {}{}{}] } }\n for ticker in final_stock_financials_dict_list:\n print(\"##########################\")\n print(ticker)\n for i in range(0,len(ticker)):\n writer.writerow({ \n \"Id\": countId\n ,\"tickerId\": countTickerId\n ,\"ticker\":ticker[\"results\"][i][\"ticker\"]\n ,\"period\":ticker[\"results\"][i][\"period\"]\n ,\"calendar_date\":ticker[\"results\"][i][\"calendarDate\"]\n ,\"report_period\":ticker[\"results\"][i][\"reportPeriod\"]\n ,\"updated\":ticker[\"results\"][i][\"updated\"]\n ,\"date_key\":ticker[\"results\"][i][\"dateKey\"]\n ,\"accumulated_other_comprehensive_income\":ticker[\"results\"][i][\"accumulatedOtherComprehensiveIncome\"]\n ,\"assets\":ticker[\"results\"][i][\"assets\"]\n ,\"assets_current\":ticker[\"results\"][i][\"assetsCurrent\"]\n ,\"assets_non_current\":ticker[\"results\"][i][\"assetsNonCurrent\"]\n ,\"book_value_per_share\":ticker[\"results\"][i][\"bookValuePerShare\"]\n ,\"capital_expenditure\":ticker[\"results\"][i][\"capitalExpenditure\"]\n ,\"cash_and_equivalents\":ticker[\"results\"][i][\"cashAndEquivalents\"]\n ,\"cash_and_equivalents_usd\":ticker[\"results\"][i][\"cashAndEquivalentsUSD\"]\n ,\"cost_of_revenue\":ticker[\"results\"][i][\"costOfRevenue\"]\n ,\"consolidated_income\":ticker[\"results\"][i][\"consolidatedIncome\"]\n ,\"current_ratio\":ticker[\"results\"][i][\"currentRatio\"]\n ,\"debt_to_equity_ratio\":ticker[\"results\"][i][\"debtToEquityRatio\"]\n ,\"debt\":ticker[\"results\"][i][\"debt\"]\n ,\"debt_current\":ticker[\"results\"][i][\"debtCurrent\"]\n ,\"debt_non_current\":ticker[\"results\"][i][\"debtNonCurrent\"]\n ,\"debt_usd\":ticker[\"results\"][i][\"debtUSD\"]\n ,\"deferred_revenue\":ticker[\"results\"][i][\"deferredRevenue\"]\n ,\"depreciation_amortization_and_accretion\":ticker[\"results\"][i][\"depreciationAmortizationAndAccretion\"]\n ,\"deposits\":ticker[\"results\"][i][\"deposits\"]\n ,\"dividend_yield\":ticker[\"results\"][i][\"dividendYield\"]\n ,\"dividends_per_basic_common_share\":ticker[\"results\"][i][\"dividendsPerBasicCommonShare\"]\n ,\"earning_before_interest_taxes\":ticker[\"results\"][i][\"earningBeforeInterestTaxes\"]\n ,\"earnings_before_interest_taxes_depreciation_amortization\":ticker[\"results\"][i][\"earningsBeforeInterestTaxesDepreciationAmortization\"]\n ,\"ebitda_margin\":ticker[\"results\"][i][\"EBITDAMargin\"]\n ,\"earnings_before_interest_taxes_depreciation_amortization_usd\":ticker[\"results\"][i][\"earningsBeforeInterestTaxesDepreciationAmortizationUSD\"]\n ,\"earning_before_interest_taxes_usd\":ticker[\"results\"][i][ \"earningBeforeInterestTaxesUSD\"]\n ,\"earnings_before_tax\":ticker[\"results\"][i][\"earningsBeforeTax\"]\n ,\"earnings_per_basic_share\":ticker[\"results\"][i][\"earningsPerBasicShare\"]\n ,\"earnings_per_diluted_share\":ticker[\"results\"][i][\"earningsPerDilutedShare\"]\n ,\"earnings_per_basic_share_usd\":ticker[\"results\"][i][\"earningsPerBasicShareUSD\"]\n ,\"shareholders_equity\":ticker[\"results\"][i][\"shareholdersEquity\"]\n ,\"shareholders_equity_usd\":ticker[\"results\"][i][ \"shareholdersEquityUSD\"]\n ,\"enterprise_value\":ticker[\"results\"][i][\"enterpriseValue\"]\n ,\"enterprise_value_over_ebit\":ticker[\"results\"][i][\"enterpriseValueOverEBIT\"]\n ,\"enterprise_value_over_ebita\":ticker[\"results\"][i][\"enterpriseValueOverEBITDA\"]\n ,\"free_cash_flow\":ticker[\"results\"][i][\"freeCashFlow\"]\n ,\"free_cash_flow_per_share\":ticker[\"results\"][i][\"freeCashFlowPerShare\"]\n ,\"foreign_currency_usd_exchange_rate\":ticker[\"results\"][i][\"foreignCurrencyUSDExchangeRate\"]\n ,\"gross_profit\":ticker[\"results\"][i][\"grossProfit\"]\n ,\"gross_margin\":ticker[\"results\"][i][\"grossMargin\"]\n ,\"goodwill_and_intangible_assets\":ticker[\"results\"][i][\"goodwillAndIntangibleAssets\"]\n ,\"interest_expense\":ticker[\"results\"][i][\"interestExpense\"]\n ,\"invested_capital\":ticker[\"results\"][i][\"investedCapital\"]\n ,\"inventory\":ticker[\"results\"][i][\"inventory\"]\n ,\"investments\":ticker[\"results\"][i][\"investments\"]\n ,\"investments_current\":ticker[\"results\"][i][\"investmentsCurrent\"]\n ,\"investments_non_current\":ticker[\"results\"][i][\"investmentsNonCurrent\"]\n ,\"total_liabilities\":ticker[\"results\"][i][\"totalLiabilities\"]\n ,\"current_liabilities\":ticker[\"results\"][i][\"currentLiabilities\"]\n ,\"liabilities_non_current\":ticker[\"results\"][i][\"liabilitiesNonCurrent\"]\n ,\"market_capitalization\":ticker[\"results\"][i][\"marketCapitalization\"]\n ,\"net_cash_flow\":ticker[\"results\"][i][\"netCashFlow\"]\n ,\"net_cash_flow_business_acquisitions_disposals\":ticker[\"results\"][i][\"netCashFlowBusinessAcquisitionsDisposals\"]\n ,\"issuance_equity_shares\":ticker[\"results\"][i][\"issuanceEquityShares\"]\n ,\"issuance_debt_securities\":ticker[\"results\"][i][\"issuanceDebtSecurities\"]\n ,\"payment_dividends_other_cash_distributions\":ticker[\"results\"][i][\"paymentDividendsOtherCashDistributions\"]\n ,\"net_cash_flow_from_financing\":ticker[\"results\"][i][\"netCashFlowFromFinancing\"]\n ,\"net_cash_flow_from_investing\":ticker[\"results\"][i][\"netCashFlowFromInvesting\"]\n ,\"net_cash_flow_investment_acquisitions_disposals\":ticker[\"results\"][i][\"netCashFlowInvestmentAcquisitionsDisposals\"]\n ,\"net_cash_flow_from_operations\":ticker[\"results\"][i][\"netCashFlowFromOperations\"]\n ,\"effect_of_exchange_rate_changes_on_cash\":ticker[\"results\"][i][\"effectOfExchangeRateChangesOnCash\"]\n ,\"net_income\":ticker[\"results\"][i][\"netIncome\"]\n ,\"net_income_common_stock\":ticker[\"results\"][i][ \"netIncomeCommonStock\"]\n ,\"net_income_common_stock_usd\":ticker[\"results\"][i][\"netIncomeCommonStockUSD\"]\n ,\"net_loss_income_from_discontinued_operations\":ticker[\"results\"][i][\"netLossIncomeFromDiscontinuedOperations\"]\n ,\"net_income_to_non_controlling_interests\":ticker[\"results\"][i][\"netIncomeToNonControllingInterests\"]\n ,\"profit_margin\":ticker[\"results\"][i][\"profitMargin\"]\n ,\"operating_expenses\":ticker[\"results\"][i][\"operatingExpenses\"]\n ,\"operating_income\":ticker[\"results\"][i][\"operatingIncome\"]\n ,\"trade_and_non_trade_payables\":ticker[\"results\"][i][\"tradeAndNonTradePayables\"]\n ,\"payout_ratio\":ticker[\"results\"][i][\"payoutRatio\"]\n ,\"price_to_book_value\":ticker[\"results\"][i][\"priceToBookValue\"]\n ,\"price_earnings\":ticker[\"results\"][i][\"priceEarnings\"]\n ,\"price_to_earnings_ratio\":ticker[\"results\"][i][\"priceToEarningsRatio\"]\n ,\"property_plant_equipment_net\":ticker[\"results\"][i][\"propertyPlantEquipmentNet\"]\n ,\"preferred_dividends_income_statement_impact\":ticker[\"results\"][i][\"preferredDividendsIncomeStatementImpact\"]\n ,\"share_price_adjusted_close\":ticker[\"results\"][i][\"sharePriceAdjustedClose\"]\n ,\"price_sales\":ticker[\"results\"][i][\"priceSales\"]\n ,\"price_to_sales_ratio\":ticker[\"results\"][i][\"priceToSalesRatio\"]\n ,\"trade_and_non_trade_receivables\":ticker[\"results\"][i][\"tradeAndNonTradeReceivables\"]\n ,\"accumulated_retained_earnings_deficit\":ticker[\"results\"][i][\"accumulatedRetainedEarningsDeficit\"]\n ,\"revenues\":ticker[\"results\"][i][\"revenues\"]\n ,\"revenues_usd\":ticker[\"results\"][i][ \"revenuesUSD\"]\n ,\"research_and_development_expense\":ticker[\"results\"][i][\"researchAndDevelopmentExpense\"]\n ,\"return_on_sales\":ticker[\"results\"][i][\"returnOnSales\"]\n ,\"share_based_compensation\":ticker[\"results\"][i][\"shareBasedCompensation\"]\n ,\"selling_general_and_administrative_expense\":ticker[\"results\"][i][\"sellingGeneralAndAdministrativeExpense\"]\n ,\"share_factor\":ticker[\"results\"][i][\"shareFactor\"]\n ,\"shares\":ticker[\"results\"][i][\"shares\"]\n ,\"weighted_average_shares\":ticker[\"results\"][i][\"weightedAverageShares\"]\n #,\"weighted_average_shares_diluted\":ticker[\"results\"][i][\"weightedAverageSharesDiluted\"] #weightedAverageSharesDiluted\n ,\"sales_per_share\":ticker[\"results\"][i][\"salesPerShare\"]\n ,\"tangible_asset_value\":ticker[\"results\"][i][\"tangibleAssetValue\"]\n ,\"tax_assets\":ticker[\"results\"][i][\"taxAssets\"]\n ,\"income_tax_expense\":ticker[\"results\"][i][\"incomeTaxExpense\"]\n ,\"tax_liabilities\":ticker[\"results\"][i][\"taxLiabilities\"]\n ,\"tangible_assets_book_value_per_share\":ticker[\"results\"][i][\"tangibleAssetsBookValuePerShare\"]\n ,\"working_capital\":ticker[\"results\"][i][\"workingCapital\"]\n })\n countId += 1\n countTickerId += 1\n \n def execute(self,stock_financials_crawler):\n \n counter = 0\n firstUrl = \"https://api.polygon.io/v3/reference/tickers?active=true&sort=ticker&order=asc&limit=1000&apiKey=fXeNsEuF5_RYIgNgFae7LsdpGSz_jAxn\"\n ticker_request = stock_financials_crawler._go_tickers(firstUrl)\n tickers_result = stock_financials_crawler._get_tickers(ticker_request)\n tickers = tickers_result[\"results\"]\n\n for ticker in tickers: # remplissage du ticker dict pour la premiere page avec les ticker names à reutiliser pour stocks fianancials \n print(counter)\n ticker_dict = {}\n print(ticker.get('ticker'))\n ticker_dict.update(tickerName=ticker.get('ticker'))\n self.final_stock_financials_tickers_list.append(ticker_dict)\n counter+=1\n if counter == 2:\n break\n\n next_url = tickers_result[\"next_url\"]\n #print(next_url) #OK verification de l'url pour voir si ma requete est fonctionnelle ou pas\n\n if requests.get(next_url).status_code == 200:\n\n ticker_next_request = stock_financials_crawler._go_tickers(next_url)\n tickers_next = stock_financials_crawler._get_next_cursor(ticker_next_request)\n\n if tickers_next != None:\n _has_next_page = True\n print(\"Ticker is different of 'None'\")\n for ticker in tickers_next: # première boucle for pour l'initialisation du premier next_url\n print(counter)\n ticker_dict = {}\n print(tickers_next)\n ticker_dict.update(tickerName=tickers_next.get('ticker'))\n self.final_stock_financials_tickers_list.append(ticker_dict)\n counter+=1\n else:\n _has_next_page = False\n print(\"Ticker is equal to 'None'\")\n\n counter = 3 # pour connaitre le nombre de page prise\n\n while _has_next_page == True:\n print(\"I am in \"+str(counter)+\"next_url\")\n tickers_next = stock_financials_crawler._get_next_cursor(ticker_next_request)\n for ticker in tickers_next: \n print(counter)\n print(ticker)\n ticker_dict.update(tickerName=tickers_next.get('ticker'))\n self.final_stock_financials_tickers_list.append(ticker_dict)\n counter+=1\n next_url = tickers_result[\"next_url\"]\n ticker_next_request = requests.get(next_url)\n else:\n pass \n\n for company in self.final_stock_financials_tickers_list:\n stockFinancialsRequest = stock_financials_crawler._go_stock_financials_details(company[\"tickerName\"])\n \"\"\" DEBUGGER\n print(stockFinancialsRequest.text)\n print(\"#######################\")\n print(requests.get(\"https://api.polygon.io/v2/reference/financials/A?limit=3&type=T&apiKey=fXeNsEuF5_RYIgNgFae7LsdpGSz_jAxn\").text) \"\"\"\n dictstockFinancials = stock_financials_crawler._get_stock_financials_details(stockFinancialsRequest)\n print(dictstockFinancials)\n self.final_stock_financials_dict_list.append(dictstockFinancials)\n print(\"in the loop:\"+str(len(self.final_stock_financials_dict_list))) # TAILLE DE LIST = 1000\n\n print(\"after the loop\"+str(len(self.final_stock_financials_dict_list))) # TAILLE DE LIST = 1000\n final_stock_financials_dict_list = self.final_stock_financials_dict_list\n stock_financials_crawler._import_features_into_csv(final_stock_financials_dict_list)\n\ndef main():\n stock_financials_crawler = StockFinancialsCrawler()\n stock_financials_crawler.execute(stock_financials_crawler)\n\nif __name__ == \"__main__\":\n main()\n\n\n", "sub_path": "dataScienceFinancialProjectExtractFinancialFeatures.py", "file_name": "dataScienceFinancialProjectExtractFinancialFeatures.py", "file_ext": "py", "file_size_in_byte": 20500, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.Session", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 30, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 54, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 56, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 61, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 75, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 217, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "260908628", "text": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport Env\r\nEnv.Custom.set()\r\nimport tweepy\r\nfrom private.UserProfile import BotProfile\r\n\r\nimport DataStore\r\n\r\n# TwitterAPI制御\r\nclass Twitter():\r\n\r\n\t# TwitterアクセスのAPI\r\n\tapi = None\r\n\r\n\t# 通信制御の停止フラグ\r\n\tisEnable = True\r\n\r\n\tdef __init__(self):\r\n\t\tself.api = None\r\n\t\tself.Enable = True\r\n\r\n\t\tself.auth()\r\n\r\n\t# Update不可設定\r\n\tdef setDisable(self):\r\n\t\tself.isEnable = False\r\n\r\n\t# Update可能設定\r\n\tdef setEnable(self):\r\n\t\tself.isEnable = True\r\n\r\n\t# 認証\r\n\tdef auth(self):\r\n\t\t# create OAuth handler\r\n\t\tauth = tweepy.OAuthHandler(BotProfile.consumer_key, BotProfile.consumer_secret)\r\n\r\n\t\t# set access token to OAuth handler\r\n\t\tauth.set_access_token(BotProfile.access_key, BotProfile.access_secret)\r\n\r\n\t\t# create API\r\n\t\tself.api = tweepy.API(auth_handler=auth)\r\n\r\n\t# Update\r\n\tdef update(self, status, replyId=0):\r\n\t\tds = DataStore.DataStore()\r\n\t\tif self.isEnable and ds.getTweetEnable():\r\n\t\t\tif replyId:\r\n\t\t\t\tself.api.update_status(status, replyId)\r\n\t\t\telse:\r\n\t\t\t\tself.api.update_status(status)\r\n\r\n\t# メンションの取得\r\n\tdef getMentions(self):\r\n\t\tuserTL = self.api.mentions_timeline()\r\n\t\tslist=[]\r\n\t\tfor s in userTL:\r\n\t\t\tslist.append((s.id, s.user.screen_name, s.text))\r\n\t\treturn slist\r\n\r\n\t# ユーザー別タイムラインの取得\r\n\tdef getUserTL(self, user):\r\n\t\tuserTL = self.api.user_timeline(id=user)\r\n\t\tslist=[]\r\n\t\tfor s in userTL:\r\n\t\t\tslist.append((s.id, s.text))\r\n\t\treturn slist\r\n\r\n\t# リフォロー機能\r\n\tdef refollow(self):\r\n\t\t# 自分の情報を取得\r\n\t\tmeid = self.api.me().id\r\n\r\n\t\t# 友達(フォロー)リストの取得\r\n\t\tfriendLst = self.api.friends_ids(id=meid)\r\n\r\n\t\t# フォロワーリストの取得\r\n\t\t# 友達(フォロー)リストの取得\r\n\t\tfollowerLst = self.api.followers_ids(id=meid)\r\n\r\n\t\t#フォロワーのリストから、フォロー済みの人を外す\r\n\t\tfor id in friendLst:\r\n\t\t\tif id in followerLst:\r\n\t\t\t\tfollowerLst.remove(id)\r\n\r\n\t\t# フォローしていない人をフォローする\r\n\t\tfor id in followerLst:\r\n\t\t\tif not self.api.get_user(id=id).protected:\r\n\t\t\t\tself.api.create_friendship(id=id)\r\n\r\ndef main():\r\n\tpass\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n", "sub_path": "toru-bot/source/riskrisk-torubot/Twitter.py", "file_name": "Twitter.py", "file_ext": "py", "file_size_in_byte": 2203, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "Env.Custom.set", "line_number": 4, "usage_type": "call"}, {"api_name": "Env.Custom", "line_number": 4, "usage_type": "attribute"}, {"api_name": "tweepy.OAuthHandler", "line_number": 36, "usage_type": "call"}, {"api_name": "private.UserProfile.BotProfile.consumer_key", "line_number": 36, "usage_type": "attribute"}, {"api_name": "private.UserProfile.BotProfile", "line_number": 36, "usage_type": "name"}, {"api_name": "private.UserProfile.BotProfile.consumer_secret", "line_number": 36, "usage_type": "attribute"}, {"api_name": "private.UserProfile.BotProfile.access_key", "line_number": 39, "usage_type": "attribute"}, {"api_name": "private.UserProfile.BotProfile", "line_number": 39, "usage_type": "name"}, {"api_name": "private.UserProfile.BotProfile.access_secret", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tweepy.API", "line_number": 42, "usage_type": "call"}, {"api_name": "DataStore.DataStore", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "296194296", "text": "import numpy as np\nimport pyccl as ccl\nimport matplotlib.pyplot as plt\n\nz_ini=0.\nz_end=2.\nnz_hires=1024\n\nn_zsam=10\n\n# For simplicity let's use a Gaussian N(z)\ndef nz_model(z):\n sigma=0.2\n mean=1.0\n return np.exp(-0.5*((z-mean)/sigma)**2)/np.sqrt(2*np.pi*sigma**2)\n\n# High-resolution array of redshifts\nz_hires=np.linspace(z_ini,z_end+0.5,int(nz_hires*(1+0.5/(z_end-z_ini))))\n\n# Array containing the edges of each redshift slice\nz_lores=np.linspace(z_ini,z_end,n_zsam+1)\n# Low edges\nz_lo=z_lores[:-1]\n# High edges\nz_hi=z_lores[1:]\n# Mid-point\nz_mid=0.5*(z_lo+z_hi)\n\ndef window_step(z,z_l,z_h):\n return (np.heaviside(z-z_l,0.)-np.heaviside(z-z_h,0.0))/(z_h-z_l)\n\n\n# Now lets generate redshift distributions for each slice\nnz_slices=np.array([window_step(z_hires,z_l,z_h)\n for z_l,z_h in zip(z_lo,z_hi)])\n\n# In each slice we just take the value of\n# the Gaussian above at the centre of the bin.\nnz_lores=nz_model(z_mid)\nnz_lores/=np.sum(nz_lores)\n\n# This is our model for the redshift distribution\nnz_steps=np.einsum('j,ji', nz_lores, nz_slices)\n\n\n# Alright, power spectra\nN_ell = 100\nell_ini = 100.\nell_end = 2000.\nells = np.linspace(ell_ini, ell_end, N_ell)\ncsm=ccl.Cosmology(Omega_b=0.05,\n Omega_c=0.25,\n sigma8=0.8,\n h=0.67,\n n_s=0.96,\n Omega_k=0)\n\ndef get_cl(cosmo,nz1,nz2):\n # This computes the power spectrum between two redshift distributions\n t1=ccl.WeakLensingTracer(cosmo,nz1)\n t2=ccl.WeakLensingTracer(cosmo,nz2)\n return ccl.angular_cl(cosmo,t1,t2,ells)\n\n\n# Power spectrum for the model redshift distribution\ncls_steps=get_cl(csm,\n (z_hires,nz_steps),\n (z_hires,nz_steps))\n\n# Power spectra for each pair of slices\ncls_slices=np.zeros([n_zsam,n_zsam,N_ell])\nfor i1 in range(n_zsam):\n for i2 in range(i1,n_zsam):\n print(i1,i2)\n cls_slices[i1,i2,:]=get_cl(csm,\n (z_hires,nz_slices[i1]),\n (z_hires,nz_slices[i2]))\n if i1!=i2:\n cls_slices[i2,i1,:]=cls_slices[i1,i2,:]\n\n\n# Now sandwich with N(z) amplitudes\ncls_sandwich=np.einsum('i,ijk,j',nz_lores,cls_slices,nz_lores)\n\n\n# Let's now make some fake data\n# Noise power spectrum, assuming 10 gals/arcmin^2\nsigma_gamma = 0.28\nndens = 10.\nnls = sigma_gamma**2 * \\\n np.ones_like(cls_steps) / \\\n (ndens * (180 * 60 / np.pi)**2)\n# Covariance matrix (assuming 10% sky fraction)\nD_ell = (ell_end - ell_ini) / N_ell\nfsky = 0.1\ncovar_cl = np.diag((cls_steps + nls)**2/((ells + 0.5) * fsky * D_ell))\n# Now let's generate some fake power spectrum data\ncls_data = np.random.multivariate_normal(cls_steps, covar_cl)\nnp.savez('cls_data',\n ls=ells,\n cls=cls_data,\n cs_covar=covar_cl)\n\n# Now let's do the same thing for the redshift distribution\n# Let's say that we have 10% errors + some offset so we have some\n# extra constant noise (so the errors aren't 0 where N(z) is 0).\nsigma_nz = 0.1 * nz_lores + 0.02 * np.amax(nz_lores)\ncovar_nz = np.diag(sigma_nz**2)\nnz_lores_data = np.random.multivariate_normal(nz_lores, covar_nz)\nnp.savez('nz_data',\n nz=nz_lores_data,\n nz_covar=covar_nz,\n z_edges_lo=z_lo,\n z_edges_hi=z_hi)\n\n# Compare high-resolution N(z) and low-resolution slicing\nnz_smooth=nz_model(z_hires)\nplt.figure()\nfor n in nz_slices:\n plt.plot(z_hires,n,'k-',lw=1)\nplt.errorbar(z_mid,\n nz_lores_data/(z_hi-z_lo),\n yerr = sigma_nz/(z_hi-z_lo),\n fmt='g.')\nplt.plot(z_hires,nz_smooth,'r-')\nplt.plot(z_hires,nz_steps,'b-')\n\n# Power spectra\nplt.figure()\nplt.plot(ells,cls_steps,'b-')\nplt.plot(ells,cls_sandwich,'r--')\nplt.errorbar(ells,cls_data,\n yerr=np.sqrt(np.diag(covar_cl)),\n fmt='g.')\nplt.plot(ells,nls,'k--',lw=1)\nplt.loglog()\n\nplt.show()\n", "sub_path": "nz_propagation_singlebin_ll.py", "file_name": "nz_propagation_singlebin_ll.py", "file_ext": "py", "file_size_in_byte": 3877, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.exp", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.heaviside", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 50, "usage_type": "call"}, {"api_name": "pyccl.Cosmology", "line_number": 51, "usage_type": "call"}, {"api_name": "pyccl.WeakLensingTracer", "line_number": 60, "usage_type": "call"}, {"api_name": "pyccl.WeakLensingTracer", "line_number": 61, "usage_type": "call"}, {"api_name": "pyccl.angular_cl", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.diag", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.random.multivariate_normal", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 98, "usage_type": "attribute"}, {"api_name": "numpy.savez", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random.multivariate_normal", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.savez", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.loglog", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "74982089", "text": "from __future__ import annotations\n\nimport json\nimport logging\nimport os\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Optional, Type, TypeVar, Union\n\nfrom gretel_client.rest.api.projects_api import ProjectsApi\nfrom gretel_client.rest.api_client import ApiClient\nfrom gretel_client.rest.configuration import Configuration\nfrom gretel_client.rest.exceptions import NotFoundException, UnauthorizedException\n\nGRETEL = \"gretel\"\n\"\"\"Gretel application name\"\"\"\n\nGRETEL_API_KEY = \"GRETEL_API_KEY\"\n\"\"\"Env variable to configure Gretel api key.\"\"\"\n\nGRETEL_ENDPOINT = \"GRETEL_ENDPOINT\"\n\"\"\"Env variable name to configure default Gretel endpoint. Defaults\nto DEFAULT_GRETEL_ENDPOINT.\n\"\"\"\n\nGRETEL_CONFIG_FILE = \"GRETEL_CONFIG_FILE\"\n\"\"\"Env variable name to override default configuration file location\"\"\"\n\nGRETEL_PROJECT = \"GRETEL_PROJECT\"\n\"\"\"Env variable name to select default project\"\"\"\n\n\nDEFAULT_GRETEL_ENDPOINT = \"https://api.gretel.cloud\"\n\"\"\"Default gretel endpoint\"\"\"\n\n\nclass GretelClientConfigurationError(Exception):\n ...\n\n\nT = TypeVar(\"T\")\n\n\nclass RunnerMode(Enum):\n LOCAL = \"local\"\n CLOUD = \"cloud\"\n MANUAL = \"manual\"\n\n\nDEFAULT_RUNNER = RunnerMode.CLOUD\n\n\nclass ClientConfig:\n \"\"\"Holds Gretel client configuration details. This can be instantiated from\n a file or environment.\n \"\"\"\n\n endpoint: str\n \"\"\"Gretel API endpoint.\"\"\"\n\n api_key: Optional[str] = None\n \"\"\"Gretel API key.\"\"\"\n\n default_project_name: Optional[str] = None\n \"\"\"Default Gretel project name.\"\"\"\n\n default_runner: str = DEFAULT_RUNNER.value\n \"\"\"Default runner\"\"\"\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n api_key: Optional[str] = None,\n default_project_name: Optional[str] = None,\n default_runner: str = DEFAULT_RUNNER.value,\n ):\n self.endpoint = endpoint or os.getenv(GRETEL_ENDPOINT) or DEFAULT_GRETEL_ENDPOINT\n self.api_key = api_key or os.getenv(GRETEL_API_KEY)\n self.default_runner = default_runner\n self.default_project_name = default_project_name or os.getenv(GRETEL_PROJECT) or default_project_name\n\n @classmethod\n def from_file(cls, file_path: Path) -> ClientConfig:\n config = json.loads(file_path.read_bytes())\n return cls.from_dict(config)\n\n @classmethod\n def from_env(cls) -> ClientConfig:\n return cls()\n\n @classmethod\n def from_dict(cls, source: dict) -> ClientConfig:\n return cls(**{k: v for k, v in source.items() if k in cls.__annotations__.keys()})\n\n def _get_api_client(self) -> ApiClient:\n configuration = Configuration(\n host=self.endpoint, api_key={\"ApiKey\": self.api_key}\n )\n return ApiClient(configuration)\n\n def get_api(self, api_interface: Type[T]) -> T:\n return api_interface(self._get_api_client())\n\n def _check_project(self, project_name: str = None) -> Optional[str]:\n if not project_name:\n return None\n projects_api = self.get_api(ProjectsApi)\n try:\n projects_api.get_project(project_id=project_name)\n except (UnauthorizedException, NotFoundException) as ex:\n raise GretelClientConfigurationError(\n f\"Project {project_name} is invalid\"\n ) from ex\n return project_name\n\n def update_default_project(self, project_id: str):\n \"\"\"Updates the default project.\n\n Args:\n project_name: The name or id of the project to set.\n \"\"\"\n self.default_project_name = project_id\n\n @property\n def as_dict(self) -> dict:\n return {\n prop: getattr(self, prop)\n for prop in self.__annotations__\n if not prop.startswith(\"_\")\n }\n\n def __eq__(self, other: ClientConfig) -> bool:\n return self.as_dict == other.as_dict\n\n @property\n def masked(self) -> dict:\n \"\"\"Returns a masked representation of the config object.\"\"\"\n c = self.as_dict\n c[\"api_key\"] = \"[redacted from output]\"\n return c\n\n @property\n def masked_api_key(self) -> str:\n if not self.api_key:\n return \"None\"\n\n return self.api_key[:8] + \"****\"\n\n\ndef _get_config_path() -> Path:\n \"\"\"Returns the path to the system's Gretel config\"\"\"\n from_env = os.getenv(GRETEL_CONFIG_FILE)\n if from_env:\n return Path(from_env)\n return Path().home() / f\".{GRETEL}\" / \"config.json\"\n\n\ndef _load_config(config_path: Path = None) -> ClientConfig:\n \"\"\"This will load in a Gretel config that can be used for making\n requests to Gretel's API.\n\n By default this function will look for a config on the local machine. If that\n config doesn't exist, it will fallback to building a config using environment\n variables on the system.\n\n Args:\n config_path: Path to a local Gretel config. This defaults to\n ``$HOME/.gretel/config.json``.\n \"\"\"\n if not config_path:\n config_path = _get_config_path()\n if not config_path.exists():\n return ClientConfig.from_env()\n try:\n return ClientConfig.from_file(config_path)\n except Exception as ex:\n raise GretelClientConfigurationError(\n f\"Could not load config from {config_path}\"\n ) from ex\n\n\ndef write_config(config: ClientConfig, config_path: Union[str, Path] = None) -> Path:\n \"\"\"Writes a Gretel client config to disk.\n\n Args:\n config: The client config to write\n config_path: Path to write the config to. If not path is provided, the\n default ``$HOME/.gretel/config.json`` path is used.\n \"\"\"\n if not config_path:\n config_path = _get_config_path()\n if isinstance(config_path, str):\n config_path = Path(config_path)\n try:\n if not config_path.exists():\n config_path.parent.mkdir(exist_ok=True, parents=True)\n config_path.touch()\n config_path.write_text(json.dumps(config.as_dict, indent=4) + \"\\n\")\n return config_path\n except Exception as ex:\n raise GretelClientConfigurationError(\n f\"Could write config to {config_path}\"\n ) from ex\n\n\n_session_client_config = _load_config() # noqa\n\n\ndef get_session_config() -> ClientConfig:\n \"\"\"Return the session's client config\"\"\"\n return _session_client_config\n\n\ndef configure_session(config: Union[str, ClientConfig]):\n \"\"\"Updates client config for the session\n\n Args:\n config: The config to update. If the config is a string, this function\n will attempt to parse it as a Gretel URI.\n \"\"\"\n global _session_client_config\n if isinstance(config, ClientConfig):\n _session_client_config = config\n if isinstance(config, str):\n raise NotImplementedError(\"Gretel URIs are not supported yet.\")\n\n\n_custom_logger = None\n\n\ndef get_logger(name: str = None) -> logging.Logger:\n return _custom_logger or logging.getLogger(name)\n\n\ndef configure_custom_logger(logger):\n global _custom_logger\n _custom_logger = logger\n", "sub_path": "src/gretel_client/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 6988, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "typing.TypeVar", "line_number": 41, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 74, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 77, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 78, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 80, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 83, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 84, "usage_type": "call"}, {"api_name": "gretel_client.rest.configuration.Configuration", "line_number": 96, "usage_type": "call"}, {"api_name": "gretel_client.rest.api_client.ApiClient", "line_number": 99, "usage_type": "call"}, {"api_name": "gretel_client.rest.api_client.ApiClient", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 101, "usage_type": "name"}, {"api_name": "gretel_client.rest.api.projects_api.ProjectsApi", "line_number": 107, "usage_type": "argument"}, {"api_name": "gretel_client.rest.exceptions.UnauthorizedException", "line_number": 110, "usage_type": "name"}, {"api_name": "gretel_client.rest.exceptions.NotFoundException", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 104, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 152, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 154, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 155, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 150, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 158, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 182, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 182, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 193, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 198, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 214, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 232, "usage_type": "call"}, {"api_name": "logging.Logger", "line_number": 231, "usage_type": "attribute"}]} +{"seq_id": "171445355", "text": "#################### Imports #########################\nfrom __future__ import print_function, division\nimport argparse\nimport os\nimport json\nimport torch\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport torchvision.models as models\nimport torch.nn as nn\nfrom torch.optim import lr_scheduler\nimport time\nimport numpy as np\nfrom torch.autograd import Variable\nfrom pathlib import Path\nfrom PIL import Image\nimport pandas as pd\n#from skimage import io, transform\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nimport time\nfrom IPython.display import clear_output\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n##################################################\n\n# This script was the first attempt at gathering \n# the necassary code to appropriately run the \n# training of SegNet. The script was made to\n# save the model as well as be flexible for \n# running in parallel over multiple instances.\n\n# See refactored segnet2 training script\n\n##################################################\n\n\ndef _average_gradients(model):\n # Gradient averaging.\n size = float(dist.get_world_size())\n for param in model.parameters():\n dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0)\n param.grad.data /= size\n\n\nclass SegNet(nn.Module):\n \"\"\"SegNet: A Deep Convolutional Encoder-Decoder Architecture for\n Image Segmentation. https://arxiv.org/abs/1511.00561\n See https://github.com/alexgkendall/SegNet-Tutorial for original models.\n Args:\n num_classes (int): number of classes to segment\n n_init_features (int): number of input features in the fist convolution\n drop_rate (float): dropout rate of each encoder/decoder module\n filter_config (list of 5 ints): number of output features at each level\n \"\"\"\n def __init__(self, num_classes, n_init_features=1, drop_rate=0.5,\n filter_config=(64, 128, 256, 512, 512)):\n super(SegNet, self).__init__()\n\n self.encoders = nn.ModuleList()\n self.decoders = nn.ModuleList()\n # setup number of conv-bn-relu blocks per module and number of filters\n encoder_n_layers = (2, 2, 3, 3, 3)\n encoder_filter_config = (n_init_features,) + filter_config\n decoder_n_layers = (3, 3, 3, 2, 1)\n decoder_filter_config = filter_config[::-1] + (filter_config[0],)\n\n for i in range(0, 5):\n # encoder architecture\n self.encoders.append(_Encoder(encoder_filter_config[i],\n encoder_filter_config[i + 1],\n encoder_n_layers[i], drop_rate))\n\n # decoder architecture\n self.decoders.append(_Decoder(decoder_filter_config[i],\n decoder_filter_config[i + 1],\n decoder_n_layers[i], drop_rate))\n\n # final classifier (equivalent to a fully connected layer)\n self.classifier = nn.Conv2d(filter_config[0], num_classes, 3, 1, 1)\n\n def forward(self, x):\n indices = []\n unpool_sizes = []\n feat = x\n\n # encoder path, keep track of pooling indices and features size\n for i in range(0, 5):\n (feat, ind), size = self.encoders[i](feat)\n indices.append(ind)\n unpool_sizes.append(size)\n\n # decoder path, upsampling with corresponding indices and size\n for i in range(0, 5):\n feat = self.decoders[i](feat, indices[4 - i], unpool_sizes[4 - i])\n\n return self.classifier(feat)\n\n\nclass _Encoder(nn.Module):\n def __init__(self, n_in_feat, n_out_feat, n_blocks=2, drop_rate=0.5):\n \"\"\"Encoder layer follows VGG rules + keeps pooling indices\n Args:\n n_in_feat (int): number of input features\n n_out_feat (int): number of output features\n n_blocks (int): number of conv-batch-relu block inside the encoder\n drop_rate (float): dropout rate to use\n \"\"\"\n super(_Encoder, self).__init__()\n\n layers = [nn.Conv2d(n_in_feat, n_out_feat, 3, 1, 1),\n nn.BatchNorm2d(n_out_feat),\n nn.ReLU(inplace=True)]\n\n if n_blocks > 1:\n layers += [nn.Conv2d(n_out_feat, n_out_feat, 3, 1, 1),\n nn.BatchNorm2d(n_out_feat),\n nn.ReLU(inplace=True)]\n if n_blocks == 3:\n layers += [nn.Dropout(drop_rate)]\n\n self.features = nn.Sequential(*layers)\n\n def forward(self, x):\n output = self.features(x)\n return F.max_pool2d(output, 2, 2, return_indices=True), output.size()\n\n\nclass _Decoder(nn.Module):\n \"\"\"Decoder layer decodes the features by unpooling with respect to\n the pooling indices of the corresponding decoder part.\n Args:\n n_in_feat (int): number of input features\n n_out_feat (int): number of output features\n n_blocks (int): number of conv-batch-relu block inside the decoder\n drop_rate (float): dropout rate to use\n \"\"\"\n def __init__(self, n_in_feat, n_out_feat, n_blocks=2, drop_rate=0.5):\n super(_Decoder, self).__init__()\n\n layers = [nn.Conv2d(n_in_feat, n_in_feat, 3, 1, 1),\n nn.BatchNorm2d(n_in_feat),\n nn.ReLU(inplace=True)]\n\n if n_blocks > 1:\n layers += [nn.Conv2d(n_in_feat, n_out_feat, 3, 1, 1),\n nn.BatchNorm2d(n_out_feat),\n nn.ReLU(inplace=True)]\n if n_blocks == 3:\n layers += [nn.Dropout(drop_rate)]\n\n self.features = nn.Sequential(*layers)\n\n def forward(self, x, indices, size):\n unpooled = F.max_unpool2d(x, indices, 2, 2, 0, size)\n return self.features(unpooled)\n\n\n\nif __name__ =='__main__':\n\n parser = argparse.ArgumentParser()\n\n # hyperparameters sent by the client are passed as command-line arguments to the script.\n parser.add_argument('--epochs', type=int, default=50)\n parser.add_argument('--batch-size', type=int, default=32)\n parser.add_argument('--learning-rate', type=float, default=0.001)\n parser.add_argument('--use-cuda', type=bool, default=False)\n\n # Data, model, and output directories\n parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])\n parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])\n # parser.add_argument('--test', type=str, default=os.environ['SM_CHANNEL_TEST']) don't need test right now, can test in seperate script\n\n # default to the value in environment variable `SM_MODEL_DIR`. Using args makes the script more portable.\n #parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n\n\n parser.add_argument('--backend', type=str, default=None,\n help='backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)')\n\n # Container environment\n #env = sagemaker_containers.training_env()\n #parser.add_argument('--hosts', type=list, default=env.hosts)\n\n # using it in argparse\n parser.add_argument('hosts', type=str, default=json.loads(os.environ['SM_HOSTS']))\n\n #parser.add_argument('--current-host', type=str, default=env.current_host)\n # using it in argparse\n parser.add_argument('--current_host', type=str, default=os.environ['SM_CURRENT_HOST'])\n #parser.add_argument('--num-gpus', type=int, default=env.num_gpus)\n # using it in argparse\n parser.add_argument('--num_gpus', type=int, default=os.environ['SM_NUM_GPUS'])\n\n # using it as variable\n num_gpus = int(os.environ['SM_NUM_GPUS'])\n # using it as variable\n hosts = json.loads(os.environ['SM_HOSTS'])\n \n args, _ = parser.parse_known_args()\n \n is_distributed = len(args.hosts) > 1 and args.backend is not None\n #logger.debug(\"Distributed training - {}\".format(is_distributed))\n use_cuda = num_gpus > 0\n #logger.debug(\"Number of gpus available - {}\".format(args.num_gpus))\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n if is_distributed:\n # Initialize the distributed environment.\n world_size = len(hosts)\n os.environ['WORLD_SIZE'] = str(world_size)\n # using it as variable\n current_host = os.environ['SM_CURRENT_HOST']\n ######## breaking the code not sure what it was in the first place\n #host_rank = args.hosts.index(current_host)\n #dist.init_process_group(backend=args.backend, \n #rank=host_rank, \n # world_size=world_size)\n #logger.info(\n # 'Init distributed env: \\'{}\\' backend on {} nodes. '.format(args.backend, \n # dist.get_world_size()) + \\\n # 'Current host rank is {}. Number of gpus: {}'.format(\n # dist.get_rank(), args.num_gpus))\n \n\n ################################# Training ###################################\n # ... load from args.train and args.test, train a model, write model to args.model_dir.\n\n ######### Unpack Args ##########\n train_dir = args.train\n #model_dir = args.model_dir just copy and pasted model becuase it wasn't working\n\n batch_size = args.batch_size\n learning_rate = args.learning_rate\n epochs = args.epochs\n\n use_cuda = args.use_cuda\n\n\n\n ############## suspect that this is causing the weird tensor bug\n ########## GPU stuff that I don't know lol #############\n # define variables if GPU is to be used\n #if torch.cuda.is_available():\n # use_gpu = True\n # print(\"Using GPU\")\n #else:\n # use_gpu = False\n #FloatTensor = torch.cuda.FloatTensor if use_gpu else torch.FloatTensor\n #LongTensor = torch.cuda.LongTensor if use_gpu else torch.LongTensor\n #ByteTensor = torch.cuda.ByteTensor if use_gpu else torch.ByteTensor\n #Tensor = FloatTensor\n\n ########## Dataclass for segmentation ###########\n class CloudDataset(Dataset):\n def __init__(self, r_dir, g_dir, b_dir, nir_dir, gt_dir, pytorch=True):\n super().__init__()\n\n # Loop through the files in red folder and combine, into a dictionary, the other bands\n self.files = [self.combine_files(f, g_dir, b_dir, nir_dir, gt_dir) for f in r_dir.iterdir() if\n not f.is_dir()]\n self.pytorch = pytorch\n\n def combine_files(self, r_file: Path, g_dir, b_dir, nir_dir, gt_dir):\n\n files = {'red': r_file,\n 'green': g_dir / r_file.name.replace('red', 'green'),\n 'blue': b_dir / r_file.name.replace('red', 'blue'),\n 'nir': nir_dir / r_file.name.replace('red', 'nir'),\n 'gt': gt_dir / r_file.name.replace('red', 'gt')}\n\n return files\n\n def __len__(self):\n\n return len(self.files)\n\n def open_as_array(self, idx, invert=False, include_nir=False):\n\n raw_rgb = np.stack([np.array(Image.open(self.files[idx]['red'])),\n np.array(Image.open(self.files[idx]['green'])),\n np.array(Image.open(self.files[idx]['blue'])),\n ], axis=2)\n\n if include_nir:\n nir = np.expand_dims(np.array(Image.open(self.files[idx]['nir'])), 2)\n raw_rgb = np.concatenate([raw_rgb, nir], axis=2)\n\n if invert:\n raw_rgb = raw_rgb.transpose((2, 0, 1))\n\n # normalize\n return (raw_rgb / np.iinfo(raw_rgb.dtype).max)\n\n def open_mask(self, idx, add_dims=False):\n\n raw_mask = np.array(Image.open(self.files[idx]['gt']))\n raw_mask = np.where(raw_mask == 255, 1, 0)\n\n return np.expand_dims(raw_mask, 0) if add_dims else raw_mask\n\n def __getitem__(self, idx):\n\n x = torch.tensor(self.open_as_array(idx, invert=self.pytorch, include_nir=True), dtype=torch.float32)\n y = torch.tensor(self.open_mask(idx, add_dims=False), dtype=torch.torch.int64)\n\n return x, y\n\n def open_as_pil(self, idx):\n\n arr = 256 * self.open_as_array(idx)\n\n return Image.fromarray(arr.astype(np.uint8), 'RGB')\n\n def __repr__(self):\n s = 'Dataset class with {} files'.format(self.__len__())\n\n return s\n\n ########## Dataclass for segmentation ###########\n base_path = Path(train_dir) #### NEW CHANGE FOR SAGE\n data = CloudDataset(base_path / 'train_red',\n base_path / 'train_green',\n base_path / 'train_blue',\n base_path / 'train_nir',\n base_path / 'train_gt')\n\n ####### Split the data ########\n train_ds, valid_ds = torch.utils.data.random_split(data, (6000, 2400))\n train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)\n valid_dl = DataLoader(valid_ds, batch_size=batch_size, shuffle=True)\n\n\n ################ Initialise Model ###############\n num_classes = 2 # assuming cloud and non cloud\n num_channels = 4 # for the cloud data, for now\n model = SegNet(num_classes, n_init_features=num_channels)\n model = model.to(device)\n \n \n \n if is_distributed and use_cuda:\n # multi-machine multi-gpu case\n model = torch.nn.parallel.DistributedDataParallel(model)\n else:\n # single-machine multi-gpu case or single-machine or multi-machine cpu case\n model = torch.nn.DataParallel(model)\n\n optimizer = optim.Adam(model.parameters(), lr=learning_rate) # using this becuase SmokeNet did\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)\n\n ################ Model training ##############\n\n def train(model, train_dl, valid_dl, loss_fn, optimizer, scheduler, acc_fn, epochs=1):\n start = time.time()\n\n train_loss, valid_loss = [], []\n\n best_acc = 0.0\n\n for epoch in range(epochs):\n print('Epoch {}/{}'.format(epoch, epochs - 1))\n print('-' * 10)\n\n for phase in ['train', 'valid']:\n if phase == 'train':\n model.train(True) # Set trainind mode = true\n dataloader = train_dl\n else:\n model.train(False) # Set model to evaluate mode\n dataloader = valid_dl\n\n running_loss = 0.0\n running_acc = 0.0\n\n step = 0\n\n # iterate over data\n for x, y in dataloader:\n x = x.to(device)\n y = y.to(device)\n \n step += 1\n\n # forward pass\n if phase == 'train':\n # zero the gradients\n optimizer.zero_grad()\n outputs = model(x)\n loss = loss_fn(outputs, y)\n\n # the backward pass frees the graph memory, so there is no\n # need for torch.no_grad in this training pass\n loss.backward()\n if is_distributed and not use_cuda:\n # average gradients manually for multi-machine cpu case only\n _average_gradients(model)\n optimizer.step()\n scheduler.step()\n\n else:\n with torch.no_grad():\n outputs = model(x)\n loss = loss_fn(outputs, y.long())\n\n # stats - whatever is the phase\n acc = acc_fn(outputs, y)\n\n running_acc += acc * dataloader.batch_size\n running_loss += loss * dataloader.batch_size\n\n if step % 100 == 0:\n # clear_output(wait=True)\n print('Current step: {} Loss: {} Acc: {} AllocMem (Mb): {}'.format(step, loss, acc,\n torch.cuda.memory_allocated() / 1024 / 1024))\n # print(torch.cuda.memory_summary())\n\n epoch_loss = running_loss / len(dataloader.dataset)\n epoch_acc = running_acc / len(dataloader.dataset)\n\n clear_output(wait=True)\n print('Epoch {}/{}'.format(epoch, epochs - 1))\n print('-' * 10)\n print('{} Loss: {:.4f} Acc: {}'.format(phase, epoch_loss, epoch_acc))\n print('-' * 10)\n\n train_loss.append(epoch_loss) if phase == 'train' else valid_loss.append(epoch_loss)\n\n time_elapsed = time.time() - start\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n\n return train_loss, valid_loss\n\n\n def acc_metric(predb, yb):\n return (predb.argmax(dim=1) == yb.cuda()).float().mean()\n \n '''\n if torch.cuda.is_available():\n model.cuda()\n\n if torch.cuda.device_count() > 1:\n print(\"Using\", torch.cuda.device_count(), \"GPUs!\")\n # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs\n model = nn.DataParallel(model)\n '''\n \n \n ####### train\n loss_fn = nn.CrossEntropyLoss()\n opt = torch.optim.Adam(model.parameters(), lr=learning_rate)\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)\n train_loss, valid_loss = train(model, train_dl, valid_dl, loss_fn, opt,exp_lr_scheduler, acc_metric, epochs=epochs)\n\n\n ############################## Save model ####################################\n # ... train `model`, then save it to `model_dir`\n with open(os.path.join(args.model_dir, 'model.pth'), 'wb') as f:\n #logger.info(\"Saving the model.\")\n torch.save(model.cpu().state_dict(), f)\n\n\n\n\n\n\n\n\n", "sub_path": "Segmentation/Sagemaker Training/CloudSegNet/segnet1_train_script.py", "file_name": "segnet1_train_script.py", "file_ext": "py", "file_size_in_byte": 18178, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "torch.nn.Module", "line_number": 50, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 104, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.nn.functional.max_pool2d", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 130, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 133, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 133, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 145, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 146, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 147, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 150, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 151, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 152, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 154, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.nn.functional.max_unpool2d", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 159, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 166, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 175, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 176, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 191, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 191, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 195, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 198, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 201, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 203, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 203, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 212, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 217, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 219, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 261, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 270, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 286, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 286, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 286, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 287, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 287, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 287, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 288, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 288, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 288, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 292, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 292, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 292, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.iinfo", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 303, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 303, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 303, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 306, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 310, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 310, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 311, "usage_type": "call"}, {"api_name": "torch.torch", "line_number": 311, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 319, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 319, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 319, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 327, "usage_type": "call"}, {"api_name": "torch.utils.data.random_split", "line_number": 335, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 335, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 336, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 337, "usage_type": "call"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 350, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 350, "usage_type": "attribute"}, {"api_name": "torch.nn.DataParallel", "line_number": 353, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 353, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 355, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 355, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 356, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 356, "usage_type": "name"}, {"api_name": "time.time", "line_number": 361, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 408, "usage_type": "call"}, {"api_name": "torch.cuda.memory_allocated", "line_number": 421, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 421, "usage_type": "attribute"}, {"api_name": "IPython.display.clear_output", "line_number": 427, "usage_type": "call"}, {"api_name": "time.time", "line_number": 435, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 456, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 456, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 457, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 457, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 458, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 458, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 464, "usage_type": "call"}, {"api_name": "os.path", "line_number": 464, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 466, "usage_type": "call"}]} +{"seq_id": "320268770", "text": "\"\"\"\nALFF\n\nThe purpose of the following script is to compute ALFF and fALFF for a resting-state time series. \nThe time series is baseline corrected and nuisance regressors are taken either from mean WM and\nCSF masks or based on biopac recordings. Optionally, no nuisance regression is performed.\n\nBefore running the script, login to queen via ssh and set the freesurfer, ANTS and AFNI environments \nby calling FREESURFER, ANTSENV and AFNI in the terminal.\n\ncreated by Daniel Haenelt\nDate created: 02-03-2019\nLast modified: 15-04-2020\n\"\"\"\nimport os\nimport nibabel as nb\nfrom lib.preprocessing.get_nuisance_mask import get_nuisance_mask\nfrom lib.preprocessing.get_nuisance_regressor import get_nuisance_regressor\nfrom lib.processing.get_alff import get_alff\n\n# input\nanatomy = \"\" # T1w full brain anatomy (e.g. orig)\nfunction = \"/data/pt_01983/func/resting_state2/uadata.nii\" # baseline uncorrected\ndeformation = \"\" # deformation ana -> epi\nbiopac_input = \"\" # *.mat file\npath_output = \"/data/pt_01983/func/resting_state2/alff/native\"\n\n# add path\npathSPM = \"/data/pt_01880/source/spm12\"\npathLIB = \"/data/hu_haenelt/projects/scripts/lib/preprocessing\"\n\n# parameters\nTR = 3 # repetition time in s\ncutoff_highpass = 180 # cutoff frequency for baseline correction in 1/Hz\nnerode_wm = 1 # number of wm mask eroding iterations\nnerode_csf = 1 # number of csf mask eroding iterations\nhp_freq = 0.01 # highpass cutoff frequency (bandpass filter) in Hz\nlp_freq = 0.08 # lowpass cutoff frequency (bandpass filter) in Hz\n\n# analysis type\nnuisance_regression = False\nsegmentation = False\nbiopac = False\ncleanup = True\n\n\"\"\" do not edit below \"\"\"\n\n# make output folder\nif not os.path.exists(path_output):\n os.makedirs(path_output)\n\n# get path and filenames\npath = os.path.dirname(function)\nfile = os.path.basename(function)\nbfile = \"b\" + file # filename of baseline corrected time series\nrfile = \"r\" + file # filename of residual time series\n\n# physiological noise regression\nif nuisance_regression:\n \n # baseline correction\n previous_cwd = os.getcwd()\n os.chdir(pathLIB)\n os.system(\"matlab\" + \\\n \" -nodisplay -nodesktop -r \" + \\\n \"\\\"baseline_correction(\\'{0}\\', {1}, {2}, \\'{3}\\'); exit;\\\"\". \\\n format(function, TR, cutoff_highpass, pathSPM))\n os.chdir(previous_cwd)\n\n if biopac:\n \n # get biopac regressors\n previous_cwd = os.getcwd()\n os.chdir(pathLIB)\n os.system(\"matlab\" + \\\n \" -nodisplay -nodesktop -r \" + \\\n \"\\\"get_biopac_regressor(\\'{0}\\', \\'{1}\\', \\'{2}\\', \\'{3}\\', {4}); exit;\\\"\". \\\n format(os.path.join(path,bfile), biopac_input, pathSPM, path_output, TR))\n os.chdir(previous_cwd)\n \n else:\n\n # get wm and csf mask\n get_nuisance_mask(anatomy, pathSPM, deformation, path_output, \n nerode_wm, nerode_csf, segmentation, cleanup)\n \n # set mask to zero where function is equal to zero\n func_array = nb.load(function).get_fdata()\n func_array = func_array[:,:,:,0]\n \n wm = nb.load(os.path.join(path_output,\"wm_mask.nii.gz\"))\n wm_array = wm.get_fdata()\n wm_array[func_array == 0] = 0\n output = nb.Nifti1Image(wm_array, wm.affine, wm.header)\n nb.save(output,os.path.join(path_output,\"wm_mask.nii.gz\"))\n\n csf = nb.load(os.path.join(path_output,\"csf_mask.nii.gz\"))\n csf_array = csf.get_fdata()\n csf_array[func_array == 0] = 0\n output = nb.Nifti1Image(csf_array, csf.affine, csf.header)\n nb.save(output,os.path.join(path_output,\"csf_mask.nii.gz\"))\n\n # get nuisance regressor\n os.chdir(previous_cwd) # change to existing path because of cleanup\n get_nuisance_regressor(os.path.join(path,bfile), \n os.path.join(path_output,\"wm_mask.nii.gz\"), \n os.path.join(path_output,\"csf_mask.nii.gz\"), \n path_output)\n \n # nuisance regression\n if cleanup:\n clean_glm = 1\n else:\n clean_glm = 0\n\n previous_cwd = os.getcwd()\n os.chdir(pathLIB)\n os.system(\"matlab\" + \\\n \" -nodisplay -nodesktop -r \" + \\\n \"\\\"regress_physio(\\'{0}\\', \\'{1}\\', \\'{2}\\', {3}, {4}, \\'{5}\\', {6}); exit;\\\"\". \\\n format(function, \n os.path.join(path_output,\"nuisance_regressor.txt\"), \n pathSPM, TR, cutoff_highpass, path_output, clean_glm))\n os.chdir(previous_cwd)\n\n # get alff \n get_alff(os.path.join(path_output,rfile), TR, path_output, hp_freq, lp_freq, cleanup)\n \n # remove baseline corrected time series\n if cleanup:\n os.remove(os.path.join(path,bfile))\n\nelse:\n\n # get alff\n get_alff(function, TR, path_output, hp_freq, lp_freq, cleanup)\n", "sub_path": "processing/alff.py", "file_name": "alff.py", "file_ext": "py", "file_size_in_byte": 4833, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.exists", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 62, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 63, "usage_type": "call"}, {"api_name": "os.system", "line_number": 64, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 68, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 73, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 74, "usage_type": "call"}, {"api_name": "os.system", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 79, "usage_type": "call"}, {"api_name": "lib.preprocessing.get_nuisance_mask.get_nuisance_mask", "line_number": 84, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 88, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "nibabel.Nifti1Image", "line_number": 94, "usage_type": "call"}, {"api_name": "nibabel.save", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "nibabel.load", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "nibabel.Nifti1Image", "line_number": 100, "usage_type": "call"}, {"api_name": "nibabel.save", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 104, "usage_type": "call"}, {"api_name": "lib.preprocessing.get_nuisance_regressor.get_nuisance_regressor", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 116, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 117, "usage_type": "call"}, {"api_name": "os.system", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 124, "usage_type": "call"}, {"api_name": "lib.processing.get_alff.get_alff", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "lib.processing.get_alff.get_alff", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "351701985", "text": "\"\"\"Creates new folder in the workpsace called Quick-Looks,\nReads in the tiff files, uses matplotlib to create PNGs of the rasters\"\"\"\n\nimport gdal\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef main(workspace):\n names = [item.split('.tif')[0] for item in os.listdir(workspace) if item.endswith('.tif')]\n png_folder = os.path.join(workspace, 'Quick-Looks')\n if not os.path.exists(png_folder):\n os.mkdir(png_folder)\n\n for item in names:\n raster_filename = os.path.join(workspace, str(item + '.tif'))\n print('Opening:\\n {0}'.format(raster_filename))\n png_filename = os.path.join(png_folder, str(item + '.png'))\n title = item\n\n layer = gdal.Open(raster_filename)\n band = layer.GetRasterBand(1)\n array = band.ReadAsArray()\n array[array == array[0]] = np.nan\n\n plt.imshow(array, cmap='hot')\n plt.colorbar(orientation='horizontal', cmap='hot')\n frame = plt.gca()\n frame.axes.xaxis.set_ticklabels([])\n frame.axes.yaxis.set_ticklabels([])\n plt.suptitle(title)\n plt.savefig(png_filename, bbox_inches='tight')\n print(' Wrote:\\n {0}\\n'.format(png_filename))\n plt.close('all')\n\n print('\\n\\nFINISHED!')\n\nif __name__ == '__main__':\n main('/home/vitale232/Dropbox/UNR/UNR-Thesis/Data/Temperature-Maps/Tmn/tmn_mod')", "sub_path": "quick-looks.py", "file_name": "quick-looks.py", "file_ext": "py", "file_size_in_byte": 1362, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "gdal.Open", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 25, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "15121741", "text": "from django import forms\nfrom mycore.models import Tenant, Room, Journal\n# from django.core.exceptions import ObjectDoesNotExist\n\n\nclass TenantCreateForm(forms.Form):\n first_name = forms.CharField(required=True)\n last_name = forms.CharField(required=True)\n date_of_birth = forms.DateField(required=True)\n phone = forms.CharField(required=True)\n photo = forms.ImageField(required=False)\n notes = forms.CharField(required=False)\n\n def save_tenant(self):\n fields = {k: v for k, v in self.data.items()\n if not k.endswith('token') and v}\n tenant = Tenant(**fields)\n tenant.save()\n\n\nclass TenantSearchForm(forms.Form):\n first_name = forms.CharField(required=True)\n last_name = forms.CharField(required=True)\n date_of_birth = forms.DateField(required=False)\n phone = forms.CharField(required=False)\n\n def find_tenant(self):\n filter_keys = {k: v for k, v in self.data.items()\n if not k.endswith('token') and v}\n tenant = Tenant.objects.filter(**filter_keys).all()\n return tenant\n\n\nclass RoomCreateForm(forms.Form):\n number = forms.IntegerField(required=True)\n max_guests = forms.IntegerField(required=False)\n\n def save_room(self):\n fields = {k: v for k, v in self.data.items()\n if not k.endswith('token') and v}\n room = Room(**fields)\n room.save()\n\n\nclass RoomSearchForm(forms.Form):\n number = forms.IntegerField(required=False)\n status = forms.CharField(required=False)\n\n def find_room(self):\n filter_keys = {k: v for k, v in self.data.items()\n if v and not k.endswith('token')}\n room = Room.objects.filter(**filter_keys).all()\n return room\n\n\nclass JournalUpdateForm(forms.Form):\n tenant_first_name = forms.CharField()\n tenant_last_name = forms.CharField()\n room_number = forms.IntegerField()\n guests = forms.IntegerField(required=False)\n comments = forms.CharField(required=False)\n clear_the_room = forms.BooleanField(required=False)\n\n def save_journal(self):\n tenant = Tenant.objects.get(first_name=self.data['tenant_first_name'],\n last_name=self.data['tenant_last_name'])\n room = Room.objects.get(number=self.data['room_number'])\n if self.data.get('clear_the_room'):\n journal = Journal.objects.filter(\n room__number=room.number,\n tenant__first_name=tenant.first_name,\n tenant__last_name=tenant.last_name).all()\n if len(journal) == 1:\n journal = journal[0]\n journal.tenant = None\n journal.save()\n elif len(journal) == 0:\n raise ValueError(\"You can't clear an empty room\")\n else:\n raise ValueError(\n \"It seems you have provided wrong options for update!\")\n else:\n journal = Journal(room=room,\n tenant=tenant,\n guests=int(self.data.get('guests', 0)),\n comments=self.data.get('comments'))\n journal.save()\n\n\nclass JournalSearchForm(forms.Form):\n tenant__first_name = forms.CharField(required=False)\n tenant__last_name = forms.CharField(required=False)\n room__number = forms.IntegerField(required=False)\n\n def find_journal(self):\n filter_keys = {k: v for k, v in self.data.items()\n if v and not k.endswith('token')}\n journal = Journal.objects.filter(**filter_keys).all()\n return journal\n", "sub_path": "app/concierge/mycore/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 3624, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.forms.Form", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 8, "usage_type": "name"}, {"api_name": "django.forms.DateField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "django.forms.ImageField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "mycore.models.Tenant", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 21, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 22, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 23, "usage_type": "name"}, {"api_name": "django.forms.DateField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 24, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 25, "usage_type": "name"}, {"api_name": "mycore.models.Tenant.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "mycore.models.Tenant.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "mycore.models.Tenant", "line_number": 30, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 34, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 35, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 36, "usage_type": "name"}, {"api_name": "mycore.models.Room", "line_number": 41, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 45, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 46, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 47, "usage_type": "name"}, {"api_name": "mycore.models.Room.objects.filter", "line_number": 52, "usage_type": "call"}, {"api_name": "mycore.models.Room.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "mycore.models.Room", "line_number": 52, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 56, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 57, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 58, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 59, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 60, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 60, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 61, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 61, "usage_type": "name"}, {"api_name": "django.forms.BooleanField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 62, "usage_type": "name"}, {"api_name": "mycore.models.Tenant.objects.get", "line_number": 65, "usage_type": "call"}, {"api_name": "mycore.models.Tenant.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "mycore.models.Tenant", "line_number": 65, "usage_type": "name"}, {"api_name": "mycore.models.Room.objects.get", "line_number": 67, "usage_type": "call"}, {"api_name": "mycore.models.Room.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "mycore.models.Room", "line_number": 67, "usage_type": "name"}, {"api_name": "mycore.models.Journal.objects.filter", "line_number": 69, "usage_type": "call"}, {"api_name": "mycore.models.Journal.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "mycore.models.Journal", "line_number": 69, "usage_type": "name"}, {"api_name": "mycore.models.Journal", "line_number": 83, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 90, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 90, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 91, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 92, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 92, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 93, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 93, "usage_type": "name"}, {"api_name": "mycore.models.Journal.objects.filter", "line_number": 98, "usage_type": "call"}, {"api_name": "mycore.models.Journal.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "mycore.models.Journal", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "156244606", "text": "#\n# operations.py - OCI Operations\n#\n# coding: utf-8\n# Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.\n#\n# Maintainer: David Ryder\n#\nimport sys\nimport json\nimport oci\nimport utilities\n\nclass Operations:\n def __init__(self, cp, bs, vn ):\n self.s = None # Session\n self.shapes = None\n self.auth = None\n self.valid = False\n self.cp = cp\n self.bs = bs\n self.vn = vn\n\n def attachVolume( self, compartment_id, instanceName, volumeName, retries=5 ):\n il = self.cp.list_instances( compartment_id ).data\n vl = self.bs.list_volumes( compartment_id ).data\n instance = jsonListFind( il, \"display_name\", instanceName )\n volume = jsonListFind( vl, \"display_name\", volumeName )\n data = oci.core.models.AttachVolumeDetails( )\n data.display_name = instanceName\n data.instance_id = instance.id\n data.type = \"iscsi\"\n data.volume_id = volume.id\n print( data )\n cp.attach_volume( data )\n\n def detachVolume( self, compartment_id, volumeName ):\n volList = self.bs.list_volumes( compartment_id ).data\n #print( volumeName, volList )\n volAttachements = self.cp.list_volume_attachments( compartment_id ).data\n vol = jsonListFind( volList, \"display_name\", volumeName ) # find the volume\n print( \"VOL \", vol )\n va = jsonListFind( volAttachements, \"volume_id\", vol.id ) # find the volume attachment id\n volumeId = va.id # id of the volume attachment\n cp.detach_volume( volumeId )\n\n def listBootVolumes( self, compartment_id, availability_domain ):\n print( compartment_id, availability_domain )\n resource_path = \"/bootVolumes/\"\n method = \"GET\"\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n query_params = {\n \"availabilityDomain\": availability_domain,\n \"compartmentId\": compartment_id\n }\n return cp.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Instance]\")\n\n def listBootVolumeAttachments( self, compartment_id, availability_domain ):\n resource_path = \"/bootVolumeAttachments/\"\n method = \"GET\"\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n query_params = {\n \"availabilityDomain\": availability_domain,\n \"compartmentId\": compartment_id\n }\n return self.cp.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[Instance]\")\n\n def launchInstance( self, launch_instance_details ):\n resource_path = \"/instances/\"\n method = \"POST\"\n header_params = { \"accept\": \"application/json\", \"content-type\": \"application/json\" }\n return cp.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=launch_instance_details,\n response_type=\"Instance\")\n\n def terminateInstance( self, compartment_id, instanceName, preserveBootVolume=True ):\n instance = jsonListFind( cp.list_instances( compartment_id ).data, \"display_name\", instanceName )\n self.cp.update_instance( instance.id, { \"displayName\": \"X_\" + instanceName })\n print( \"Terminating: \", instance.display_name, \"...\"+instance.id[-6:])\n resource_path = \"/instances/{instanceId}\".format( instanceId=instance.id )\n method = method = \"DELETE\"\n path_params = { \"instanceId\": instance.id }\n query_params = { \"preserveBootVolume\": preserveBootVolume }\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": \"\" }\n\n r1 = self.cp.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n path_params=path_params,\n header_params=header_params )\n print( r1 )\n\n def bootVolumeUpdate( self, compartment_id, availability_domain, bootVolumeName, bootVolumeNewName ):\n bootVolumes = oci_ListBootVolumes( compartment_id, availability_domain ).data\n bootVolume = jsonListFind( bootVolumes, \"display_name\", bootVolumeName, exact = False )\n print( \"Updating \", \"...\"+bootVolume.id[-6:], bootVolumeNewName )\n resource_path = \"/bootVolumes/{bootVolumeId}\".format( bootVolumeId=bootVolume.id )\n method = \"PUT\"\n header_params = { \"accept\": \"application/json\", \"content-type\": \"application/json\" }\n data = { \"displayName\": bootVolumeNewName }\n return cp.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=data,\n response_type=\"Instance\")\n\n def bootVolumeAttach( self, compartment_id, instanceName, bootVolumeName ):\n #availability_domain = config[\"availability_domain\"] # \"Rmpq:PHX-AD-1\"\n #compartment_id = config[\"compartment_id\"]\n instance = jsonListFind( cp.list_instances( compartment_id ).data, \"display_name\", instanceName )\n bootVolumes = oci_ListBootVolumes( availability_domain, compartment_id ).data\n bootVolume = jsonListFind( bootVolumes, \"display_name\", bootVolumeName, exact = False )\n print( bootVolume, instance )\n print( \"Attaching \", \"...\"+bootVolume.id[-6:], \" To Instance \", \"...\"+instance.id[-6:] )\n resource_path = \"/bootVolumeAttachments/\"\n method = \"POST\"\n header_params = { \"accept\": \"application/json\", \"content-type\": \"application/json\" }\n data = { \"bootVolumeId\": bootVolume.id,\n \"instanceId\": instance.id,\n \"displayName\": bootVolumeName\n }\n return cp.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n body=data,\n response_type=\"Instance\")\n\n def bootVolumeDetach( self, compartment_id, availability_domain, instanceName, bootVolumeName ):\n # detach the boot volume from the instance\n # instance must be stopped\n instances = self.cp.list_instances( compartment_id ).data\n bootVolumeAttachments = oci_ListBootVolumeAttachments( compartment_id, availability_domain ).data\n instance = jsonListFind( instances, \"display_name\", instanceName, exact = True )\n #print( instance )\n bootVolumeAttachment = jsonListFind( bootVolumeAttachments, \"id\", instance.id, exact = True )\n print( \"BV ID\", bootVolumeAttachment )\n print( \"Detatching Boot Volume \", instance.display_name, bootVolumeAttachment.id )\n resource_path = \"/bootVolumeAttachments/{bootVolumeAttachmentId}\".format( bootVolumeAttachmentId=bootVolumeAttachment.id )\n method = \"DELETE\"\n path_params = None\n query_params = None\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": \"\" }\n\n r1 = self.cp.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n path_params=path_params,\n header_params=header_params)\n print( r1 )\n\n def bootVolumeDelete( self, compartment_id, availability_domain, bootVolumeName ):\n bootVolumes = oci_ListBootVolumes( compartment_id, availability_domain ).data\n bootVolume = jsonListFind( bootVolumes, \"display_name\", bootVolumeName, exact = False )\n print( \"Deleting Boot Volume \", bootVolume.display_name, bootVolume.id )\n resource_path = \"/bootVolumes/{bootVolumeAttachmentId}\".format( bootVolumeAttachmentId=bootVolume.id )\n method = \"DELETE\"\n path_params = None\n query_params = None\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"if-match\": \"\" }\n\n r1 = self.cp.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n path_params=path_params,\n header_params=header_params)\n print( r1.message )\n", "sub_path": "operations.py", "file_name": "operations.py", "file_ext": "py", "file_size_in_byte": 8604, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "oci.core.models.AttachVolumeDetails", "line_number": 29, "usage_type": "call"}, {"api_name": "oci.core", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "275278890", "text": "\"\"\"\nAuthor: Tomasz Hachaj, 2021\nDepartment of Signal Processing and Pattern Recognition\nInstitute of Computer Science in Pedagogical University of Krakow, Poland\nhttps://sppr.up.krakow.pl/hachaj/\nData source:\n\nhttps://drive.google.com/file/d/13VIyqFNzQ6zIGmWll9tEHjOdXp5R2GZt/view\nhttps://drive.google.com/file/d/1U8bwYA8PgNuNYQnv5TNtR2az3AleyrEZ/view\nhttps://drive.google.com/file/d/1h5udf2tB64q6-N3lEh0vDhvfIyDOD43N/view\n\"\"\"\n\nimport numpy as np\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.nasnet import preprocess_input\nimport tensorflow as tf\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import Conv2DTranspose\nfrom tensorflow.keras.layers import LeakyReLU\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Concatenate\nimport os\nfrom keras.callbacks import CSVLogger\nfrom keras.callbacks import LearningRateScheduler\nfrom tensorflow.python.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.losses import binary_crossentropy\n\n#Run this code 14-teen times changing stimulus_id from 0 to 13\nstimulus_id = 13\n\nstimulus = ['ALG_1_v1_Page_1.jpg', 'ALG_1_v2_Page_1.jpg', 'ALG_2_v1_Page_1.jpg', 'ALG_2_v2_Page_1.jpg', 'BIO_Page_1.jpg',\n 'FIZ_WB1_Page_1.jpg', 'FIZ_WB2.jpg', 'FIZ_WB3_v1_Page_1.jpg', 'FIZ_WB3_v2_Page_1.jpg', 'FIZ_WB4_stereo_Page_1.jpg',\n 'FIZ_WZORY_Page_1.jpg', 'rz 1_Page_1.jpg', 'rz 2_Page_1.jpg', 'rz 3_Page_1.jpg']\n\ndef make_dir_with_check(my_path):\n try:\n os.mkdir(my_path)\n except OSError:\n print(my_path + ' exists')\n else:\n print(my_path + ' created')\n\nmake_dir_with_check('data')\nmake_dir_with_check('res')\nmake_dir_with_check('checkpoint_2pyramids')\nfor stim_name in stimulus:\n make_dir_with_check('res/' + stim_name)\n make_dir_with_check('checkpoint_2pyramids/' + stim_name)\n\ndef enable_tensorflow():\n #Enables tensorflow on GPU\n physical_devices = tf.config.experimental.list_physical_devices('GPU')\n for physical_device in physical_devices:\n tf.config.experimental.set_memory_growth(physical_device, True)\n return physical_devices\n\nphysical_devices = enable_tensorflow()\ntf.compat.v1.disable_eager_execution()\n\nmy_model = VGG16(weights='imagenet', include_top=False)\nfor layer in my_model.layers[:]:\n layer.trainable = False\n\nX = np.load('data/students.np.npy')\nY = np.load('data/students_map_gray.np.npy')\n\nimport csv\nstimulus_X = []\nwith open('data/students_stimulus.txt', newline='') as f:\n reader = csv.reader(f)\n stimulus_X = list(reader)\n\n\n#string_to_find = stimulus[stimulus_id]\nprint(my_model.summary())\n\n\nextractor5 = Model(inputs=my_model.inputs,\n outputs=my_model.get_layer(\"block5_pool\").output)\n\nextractor4 = Model(inputs=my_model.inputs,\n outputs=my_model.get_layer(\"block4_pool\").output)\n\nextractor3 = Model(inputs=my_model.inputs,\n outputs=my_model.get_layer(\"block3_pool\").output)\n\n\nxx = np.expand_dims(X[0], axis=0)\nxx = preprocess_input(xx)\n\n# build the encoder models\nencoder5 = Model(extractor5.input, extractor5.output, name=\"encoder\")\nencoder4 = Model(extractor4.input, extractor4.output, name=\"encoder\")\nencoder3 = Model(extractor3.input, extractor3.output, name=\"encoder\")\n\ndef decoder5():\n filters = (64, 128, 256, 512, 512)\n chanDim = -1\n depth_out = 1\n\n dec = encoder5.predict(xx)\n print('decoder5')\n print(dec.shape)\n\n x = my_model.get_layer(\"block5_pool\").output\n for f in filters[::-1]:\n # apply a CONV_TRANSPOSE => RELU => BN operation\n x = Conv2DTranspose(f, (3, 3), strides=2,\n padding=\"same\")(x)\n x = LeakyReLU(alpha=0.2)(x)\n x = BatchNormalization(axis=chanDim)(x)\n\n x = Conv2DTranspose(depth_out, (3, 3), padding=\"same\", name='decoder51_out')(x)\n outputs = Activation(\"sigmoid\", name='decoder5_out')(x)\n decoder = Model(my_model.inputs, outputs, name=\"decoder\")\n return decoder\n\ndef decoder4():\n filters = (64, 128, 256, 512)\n chanDim = -1\n depth_out = 1\n\n dec = encoder4.predict(xx)\n print('decoder4')\n print(dec.shape)\n\n x = my_model.get_layer(\"block4_pool\").output\n for f in filters[::-1]:\n # apply a CONV_TRANSPOSE => RELU => BN operation\n x = Conv2DTranspose(f, (3, 3), strides=2,\n padding=\"same\")(x)\n x = LeakyReLU(alpha=0.2)(x)\n x = BatchNormalization(axis=chanDim)(x)\n\n x = Conv2DTranspose(depth_out, (3, 3), padding=\"same\", name='decoder41_out')(x)\n outputs = Activation(\"sigmoid\", name='decoder4_out')(x)\n\n decoder = Model(my_model.inputs, outputs, name=\"decoder\")\n return decoder\n\ndef decoder3():\n filters = (64, 128, 256)\n chanDim = -1\n depth_out = 1\n\n dec = encoder3.predict(xx)\n print('decoder3')\n print(dec.shape)\n\n x = my_model.get_layer(\"block3_pool\").output\n for f in filters[::-1]:\n # apply a CONV_TRANSPOSE => RELU => BN operation\n x = Conv2DTranspose(f, (3, 3), strides=2,\n padding=\"same\")(x)\n x = LeakyReLU(alpha=0.2)(x)\n x = BatchNormalization(axis=chanDim)(x)\n\n x = Conv2DTranspose(depth_out, (3, 3), padding=\"same\", name='decoder31_out')(x)\n outputs = Activation(\"sigmoid\", name='decoder3_out')(x)\n\n decoder = Model(my_model.inputs, outputs, name=\"decoder\")\n return decoder\n\n\n\ndef decoder45():\n attention = Concatenate()([d4.output, d5.output])\n x = Conv2D(1, (3, 3), strides=1, padding=\"same\")(attention)\n x = LeakyReLU(alpha=0.2)(x)\n x = BatchNormalization(axis=-1)(x)\n decoder = Model(my_model.inputs, x, name=\"decoder\")\n return decoder\n\n\nd5 = decoder5()\nd4 = decoder4()\nd3 = decoder3()\nd345 = decoder45()\nprint(d345.summary())\n\nvv = d345.predict(xx)\nprint(vv.shape)\n\nstrings = sum(stimulus_X, [])\nsubstring = stimulus[stimulus_id]\nindices_train = [i for i, s in enumerate(strings) if substring not in s]\nindices_test = [i for i, s in enumerate(strings) if substring in s]\nprint(len(indices_train))\nprint(len(indices_test))\n\nprint(len(strings))\nX_train = X[indices_train, :, :, :]\nX_test = X[indices_test, :, :, :]\n\nY_train = Y[indices_train, :, :, np.newaxis]\nY_test = Y[indices_test, :, :, np.newaxis]\n\nX_train = X_train.astype(\"float32\") / 255.0\nY_train = Y_train.astype(\"float32\")\n\nX_test = X_test.astype(\"float32\") / 255.0\nY_test = Y_test.astype(\"float32\")\n\n\nX_2 = np.load('data/images.np.npy')\nY_2 = np.load('data/salMap_gray.np.npy')\nX_2 = X_2[:, :, :, :]\nY_2 = Y_2[:, :, :, np.newaxis]\n\nX_2 = X_2.astype(\"float32\") / 255.0\nY_2 = Y_2.astype(\"float32\")\n\nprint(X_train.shape)\nprint(X_2.shape)\nX_train = np.concatenate((X_train, X_2), axis=0)\nY_train = np.concatenate((Y_train, Y_2), axis=0)\n\n\n\n# checkpoint\nlatent_size = 512\nEPOCHS = 10\nBS = 16\n\n\npath_to_checkpoints = \"checkpoint_2pyramids/\" + stimulus[stimulus_id]\nDataFile = 'results.txt'\ncsv_logger = CSVLogger(path_to_checkpoints + \"/\" + DataFile + '.log')\n\n# checkpoint\nfilepath= path_to_checkpoints + \"/improvement-{epoch:02d}-{loss:.5f}-{val_loss:.5f}.hdf5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=False, mode='max', save_weights_only=True)\n\nlearning_rate_step = 10\ndef lr_scheduler(epoch, lr):\n if epoch % learning_rate_step == 0 and epoch > 1:\n lr = lr * 0.1\n print(lr)\n return lr\n\n\ncallbacks_list = [checkpoint, LearningRateScheduler(lr_scheduler, verbose=1), csv_logger]\n\n\ndef my_loss_fn(y_true, y_pred):\n squared_difference = tf.square(y_true - y_pred)\n return tf.reduce_mean(squared_difference, axis=-1) # Note the `axis=-1`\n\ndef custom_loss(y_true, y_pred):\n loss2 = binary_crossentropy(y_true, d4.output)\n loss3 = binary_crossentropy(y_true, d5.output)\n loss4 = binary_crossentropy(y_true, d345.output)\n return (loss2 + loss3 + loss4) / 3.0\n\nopt = Adam(lr=1e-2)\nd345.compile(loss=custom_loss, optimizer=opt)\n\n# train the model\nH = d345.fit(\n X_train, Y_train,\n validation_data=(X_test, Y_test),\n epochs=EPOCHS,\n batch_size=BS,\n callbacks=callbacks_list)\n", "sub_path": "encoder-decoder_train_extended_data_2pyramids.py", "file_name": "encoder-decoder_train_extended_data_2pyramids.py", "file_ext": "py", "file_size_in_byte": 8175, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.mkdir", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.config.experimental.list_physical_devices", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.config.experimental.set_memory_growth", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.config", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.disable_eager_execution", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 61, "usage_type": "attribute"}, {"api_name": "keras.applications.vgg16.VGG16", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 68, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 91, "usage_type": "call"}, {"api_name": "keras.applications.nasnet.preprocess_input", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2DTranspose", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LeakyReLU", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2DTranspose", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2DTranspose", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LeakyReLU", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 136, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2DTranspose", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2DTranspose", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LeakyReLU", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 159, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2DTranspose", "line_number": 161, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 164, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Concatenate", "line_number": 170, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 171, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LeakyReLU", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 173, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 198, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 199, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 211, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 219, "usage_type": "call"}, {"api_name": "keras.callbacks.CSVLogger", "line_number": 231, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.callbacks.ModelCheckpoint", "line_number": 235, "usage_type": "call"}, {"api_name": "keras.callbacks.LearningRateScheduler", "line_number": 245, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 249, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 250, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.binary_crossentropy", "line_number": 253, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.binary_crossentropy", "line_number": 254, "usage_type": "call"}, {"api_name": "tensorflow.keras.losses.binary_crossentropy", "line_number": 255, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 258, "usage_type": "call"}]} +{"seq_id": "291511217", "text": "# coding=utf-8\n# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport os\nimport unittest\n\nfrom pants.build_graph.address import Address\nfrom pants.build_graph.build_file_aliases import BuildFileAliases, TargetMacro\nfrom pants.build_graph.mutable_build_graph import MutableBuildGraph\nfrom pants.build_graph.target import Target\n\n\nclass BuildFileAliasesTest(unittest.TestCase):\n\n class RedTarget(Target):\n pass\n\n class BlueTarget(Target):\n pass\n\n def setUp(self):\n self.target_macro_factory = TargetMacro.Factory.wrap(\n lambda ctx: ctx.create_object(self.BlueTarget,\n type_alias='jill',\n name=os.path.basename(ctx.rel_path)),\n self.BlueTarget, self.RedTarget)\n\n def test_create(self):\n self.assertEqual(BuildFileAliases(targets={},\n objects={},\n context_aware_object_factories={}),\n BuildFileAliases())\n\n targets = {'jake': Target, 'jill': self.target_macro_factory}\n self.assertEqual(BuildFileAliases(targets=targets,\n objects={},\n context_aware_object_factories={}),\n BuildFileAliases(targets=targets))\n\n objects = {'jane': 42}\n self.assertEqual(BuildFileAliases(targets={},\n objects=objects,\n context_aware_object_factories={}),\n BuildFileAliases(objects=objects))\n\n factories = {'jim': lambda ctx: 'bob'}\n self.assertEqual(BuildFileAliases(targets={},\n objects={},\n context_aware_object_factories=factories),\n BuildFileAliases(context_aware_object_factories=factories))\n\n self.assertEqual(BuildFileAliases(targets=targets,\n objects=objects,\n context_aware_object_factories={}),\n BuildFileAliases(targets=targets, objects=objects))\n\n self.assertEqual(BuildFileAliases(targets=targets,\n objects={},\n context_aware_object_factories=factories),\n BuildFileAliases(targets=targets,\n context_aware_object_factories=factories))\n\n self.assertEqual(BuildFileAliases(targets={},\n objects=objects,\n context_aware_object_factories=factories),\n BuildFileAliases(objects=objects,\n context_aware_object_factories=factories))\n\n self.assertEqual(BuildFileAliases(targets=targets,\n objects=objects,\n context_aware_object_factories=factories),\n BuildFileAliases(targets=targets,\n objects=objects,\n context_aware_object_factories=factories))\n\n def test_create_bad_targets(self):\n with self.assertRaises(TypeError):\n BuildFileAliases(targets={'fred': object()})\n\n target = Target('fred', Address.parse('a:b'), MutableBuildGraph(address_mapper=None))\n with self.assertRaises(TypeError):\n BuildFileAliases(targets={'fred': target})\n\n def test_create_bad_objects(self):\n with self.assertRaises(TypeError):\n BuildFileAliases(objects={'jane': Target})\n\n with self.assertRaises(TypeError):\n BuildFileAliases(objects={'jane': self.target_macro_factory})\n\n def test_bad_context_aware_object_factories(self):\n with self.assertRaises(TypeError):\n BuildFileAliases(context_aware_object_factories={'george': 1})\n\n def test_merge(self):\n e_factory = lambda ctx: 'e'\n f_factory = lambda ctx: 'f'\n\n first = BuildFileAliases(targets={'a': Target},\n objects={'d': 2},\n context_aware_object_factories={'e': e_factory})\n\n second = BuildFileAliases(targets={'b': self.target_macro_factory},\n objects={'c': 1, 'd': 42},\n context_aware_object_factories={'f': f_factory})\n\n expected = BuildFileAliases(\n # nothing to merge\n targets={'a': Target, 'b': self.target_macro_factory},\n # second overrides first\n objects={'c': 1, 'd': 42},\n # combine\n context_aware_object_factories={'e': e_factory, 'f': f_factory})\n self.assertEqual(expected, first.merge(second))\n\n def test_target_types(self):\n aliases = BuildFileAliases(targets={'jake': Target, 'jill': self.target_macro_factory})\n self.assertEqual({'jake': Target}, aliases.target_types)\n\n def test_target_macro_factories(self):\n aliases = BuildFileAliases(targets={'jake': Target, 'jill': self.target_macro_factory})\n self.assertEqual({'jill': self.target_macro_factory}, aliases.target_macro_factories)\n\n def test_target_types_by_alias(self):\n aliases = BuildFileAliases(targets={'jake': Target, 'jill': self.target_macro_factory})\n self.assertEqual({'jake': {Target}, 'jill': {self.BlueTarget, self.RedTarget}},\n aliases.target_types_by_alias)\n", "sub_path": "tests/python/pants_test/build_graph/test_build_file_aliases.py", "file_name": "test_build_file_aliases.py", "file_ext": "py", "file_size_in_byte": 5578, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "unittest.TestCase", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pants.build_graph.target.Target", "line_number": 19, "usage_type": "name"}, {"api_name": "pants.build_graph.target.Target", "line_number": 22, "usage_type": "name"}, {"api_name": "pants.build_graph.build_file_aliases.TargetMacro.Factory.wrap", "line_number": 26, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.TargetMacro.Factory", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pants.build_graph.build_file_aliases.TargetMacro", "line_number": 26, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 33, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 36, "usage_type": "call"}, {"api_name": "pants.build_graph.target.Target", "line_number": 38, "usage_type": "name"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 39, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 42, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 45, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 48, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 51, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 54, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 56, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 59, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 61, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 64, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 67, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 70, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 73, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 76, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 82, "usage_type": "call"}, {"api_name": "pants.build_graph.target.Target", "line_number": 84, "usage_type": "call"}, {"api_name": "pants.build_graph.address.Address.parse", "line_number": 84, "usage_type": "call"}, {"api_name": "pants.build_graph.address.Address", "line_number": 84, "usage_type": "name"}, {"api_name": "pants.build_graph.mutable_build_graph.MutableBuildGraph", "line_number": 84, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 86, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 90, "usage_type": "call"}, {"api_name": "pants.build_graph.target.Target", "line_number": 90, "usage_type": "name"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 93, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 97, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 103, "usage_type": "call"}, {"api_name": "pants.build_graph.target.Target", "line_number": 103, "usage_type": "name"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 107, "usage_type": "call"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 111, "usage_type": "call"}, {"api_name": "pants.build_graph.target.Target", "line_number": 113, "usage_type": "name"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 121, "usage_type": "call"}, {"api_name": "pants.build_graph.target.Target", "line_number": 121, "usage_type": "name"}, {"api_name": "pants.build_graph.target.Target", "line_number": 122, "usage_type": "name"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 125, "usage_type": "call"}, {"api_name": "pants.build_graph.target.Target", "line_number": 125, "usage_type": "name"}, {"api_name": "pants.build_graph.build_file_aliases.BuildFileAliases", "line_number": 129, "usage_type": "call"}, {"api_name": "pants.build_graph.target.Target", "line_number": 129, "usage_type": "name"}, {"api_name": "pants.build_graph.target.Target", "line_number": 130, "usage_type": "name"}]} +{"seq_id": "633455926", "text": "from django.shortcuts import render\r\n\r\nprojectss = {\r\n 'mario':{\r\n\r\n }\r\n}\r\nprojects = [\r\n {\r\n 'title':'Super Marioish',\r\n 'type':'University',\r\n 'description':'',\r\n 'thumbnail':'images/topfreegames-1280.jpg',\r\n 'media_width': 575, \r\n 'media_height': 455,\r\n 'media':[\r\n {\r\n 'url':'https://www.youtube.com/embed/amrid966XhQ',\r\n 'aspectRatio':'4by3',\r\n 'isVideo': True,\r\n },\r\n {\r\n 'url':'images/java8-1280.jpg',\r\n 'alt':'java8 not found',\r\n 'isVideo': False,\r\n },\r\n {\r\n 'url':'images/jbox2d-1280.jpg',\r\n 'alt':'jbox2d not found',\r\n 'isVideo': False,\r\n },\r\n ]\r\n },\r\n {\r\n 'title':'Doctor Hyde',\r\n 'type':'Personal',\r\n 'description':'',\r\n 'thumbnail':'images/doctorhyde_title.png',\r\n 'media_width': 616.656,\r\n 'media_height': 346.719,\r\n 'media':[\r\n {\r\n 'url':'images/doctorhyde_title.png',\r\n 'alt':'doctorhyde_title not found',\r\n 'isVideo': False,\r\n },\r\n {\r\n 'url':'https://www.youtube.com/embed/uc-cjUKcY0Y',\r\n 'aspectRatio':'16by9',\r\n 'isVideo': True,\r\n },\r\n ]\r\n },\r\n]\r\n\r\n\r\n# Create your views here.\r\ndef home(request):\r\n context = {\r\n \"home_active\": \"active\",\r\n \"title\": \"Home\",\r\n \"projects\": projects,\r\n }\r\n return render(request, 'home.html', context)\r\n \r\ndef about(request):\r\n context = {\r\n \"about_active\": \"active\",\r\n \"title\": \"About\",\r\n }\r\n return render(request, 'about.html', context)\r\n\r\ndef supermario(request):\r\n context = {\r\n \"supermario_active\": \"active\",\r\n \"project\": projects[0],\r\n }\r\n return render(request, 'supermario.html', context)\r\n\r\ndef doctorhyde(request):\r\n context = {\r\n \"doctorhyde_active\": \"active\",\r\n \"project\": projects[1],\r\n }\r\n return render(request, 'doctorhyde.html', context)\r\n\r\n\r\n", "sub_path": "portfolio/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2177, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.shortcuts.render", "line_number": 64, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 71, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 78, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "359969501", "text": "import torch\nimport lightconvpoint.knn as nearest_neighbors\n\n\ndef batched_index_select(input, dim, index):\n index_shape = index.shape\n views = [input.shape[0]] + [\n 1 if i != dim else -1 for i in range(1, len(input.shape))\n ]\n expanse = list(input.shape)\n expanse[0] = -1\n expanse[dim] = -1\n index = index.view(views).expand(expanse)\n return torch.gather(input, dim, index).view(input.size(0), index_shape[1], -1)\n\n\nclass SearchQuantized:\n \"\"\"Search object for computing support points and neighborhoods with quantized\n support point search.\n\n Computes the support points and their K-nearest neighbors according to the strategy\n defined in LightConvPoint paper.\n\n # Arguments\n K: int.\n Size of the neighborhood.\n stride: int.\n Defaults to 1. Reduction factor for computing the number of support points\n (1 all input points are supoprt points).\n npoints: (optional) int.\n Defaults to None. Number of support points to be generated.\n (if used, overrides the stride)\n\n # Forward arguments\n points: 3-D torch tensor.\n The input points. Dimensions are (B, D, N) with B the batch size, D the\n dimension of the spatial space and N the number of input points.\n support_points: (optional) 3-D torch tensor.\n The support points to project features on. If not provided, use the `search`\n object of the layer to compute them.\n Dimensions are (B, D, N) with B the batch size, D the dimenstion of the\n spatial space and N the number of input points.\n\n # Returns\n support_points: 3-D torch tensor.\n The support points. If they were provided as an input, return the same\n tensor.\n indices: 3-D torch tensor.\n The indices of the neighboring points with respect to the support points.\n If they were provided as an input, return the same tensor.\n \"\"\"\n\n def __init__(self, K, stride=1, npoints=None, dilation=1):\n self.K = K\n self.stride = stride\n self.npoints = npoints\n self.dilation = dilation\n\n def __call__(self, points, support_points=None):\n\n search_K = self.K * self.dilation\n\n if support_points is None and self.stride == 1 and (self.npoints is None):\n support_points = points\n\n if support_points is None:\n # no support points have been given\n points = points.contiguous()\n if self.stride > 1 or self.stride == 1 and (self.npoints is None):\n support_point_number = max(1, int(points.shape[2]) // self.stride)\n else:\n support_point_number = self.npoints\n support_points_ids, indices, _ = nearest_neighbors.quantized_pick_knn(\n points.cpu().detach(), support_point_number, search_K\n )\n\n support_points_ids = support_points_ids.contiguous().long()\n indices = indices.contiguous().long()\n\n if points.is_cuda:\n indices = indices.cuda()\n support_points_ids = support_points_ids.cuda()\n support_points = batched_index_select(\n points.transpose(1, 2), dim=1, index=support_points_ids\n ).transpose(1, 2)\n\n else:\n # support points are known, only compute the knn\n indices = nearest_neighbors.knn(\n points.cpu().detach(), support_points.cpu().detach(), search_K\n )\n if points.is_cuda:\n indices = indices.cuda()\n\n if self.dilation > 1:\n indices = indices[:,:, torch.randperm(indices.size(2))]\n indices = indices[:,:,:self.K]\n\n return indices, support_points\n", "sub_path": "lcp/lightconvpoint/nn/search_quantized.py", "file_name": "search_quantized.py", "file_ext": "py", "file_size_in_byte": 3798, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "torch.gather", "line_number": 14, "usage_type": "call"}, {"api_name": "lightconvpoint.knn.quantized_pick_knn", "line_number": 73, "usage_type": "call"}, {"api_name": "lightconvpoint.knn", "line_number": 73, "usage_type": "name"}, {"api_name": "lightconvpoint.knn.knn", "line_number": 89, "usage_type": "call"}, {"api_name": "lightconvpoint.knn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.randperm", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "406890012", "text": "import boto3\nsession = boto3.Session(profile_name='sandbox')\nec2 = session.resource('ec2')\nkey_name = 'python_automation_key'\nkey_path = key_name + '.pem'\nkey = ec2.create_key_pair(KeyName=key_name)\nkey.key_material\nwith open(key_path, 'w') as key_file:\n key_file.write(key.key_material)\nget_ipython().run_line_magic('history', '')\n", "sub_path": "02-notifon/history.py", "file_name": "history.py", "file_ext": "py", "file_size_in_byte": 335, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "boto3.Session", "line_number": 2, "usage_type": "call"}]} +{"seq_id": "189785280", "text": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\"\"\"\n=============\ndesitarget.io\n=============\n\nFunctions for reading, writing and manipulating files related to targeting.\n\"\"\"\nfrom __future__ import (absolute_import, division)\n#\nimport numpy as np\nimport fitsio\nimport os\nimport re\nfrom . import __version__ as desitarget_version\nimport numpy.lib.recfunctions as rfn\nimport healpy as hp\nfrom glob import glob\n\nfrom desiutil import depend\nfrom desitarget.geomask import hp_in_box, box_area, is_in_box\nfrom desitarget.geomask import hp_in_cap, cap_area, is_in_cap\nfrom desitarget.geomask import is_in_hp, nside2nside, pixarea2nside\n\n# ADM set up the DESI default logger\nfrom desiutil.log import get_logger\nlog = get_logger()\n\n# ADM this is a lookup dictionary to map RELEASE to a simpler \"North\" or \"South\".\n# ADM photometric system. This will expand with the definition of RELEASE in the\n# ADM Data Model (e.g. https://desi.lbl.gov/trac/wiki/DecamLegacy/DR4sched).\n# ADM 7999 were the dr8a test reductions, for which only 'S' surveys were processed.\nreleasedict = {3000: 'S', 4000: 'N', 5000: 'S', 6000: 'N', 7000: 'S', 7999: 'S',\n 8000: 'S', 8001: 'N'}\n\noldtscolumns = [\n 'BRICKID', 'BRICKNAME', 'OBJID', 'TYPE',\n 'RA', 'RA_IVAR', 'DEC', 'DEC_IVAR',\n 'DECAM_FLUX', 'DECAM_MW_TRANSMISSION',\n 'DECAM_FRACFLUX', 'DECAM_FLUX_IVAR', 'DECAM_NOBS', 'DECAM_DEPTH', 'DECAM_GALDEPTH',\n 'WISE_FLUX', 'WISE_MW_TRANSMISSION',\n 'WISE_FLUX_IVAR',\n 'SHAPEDEV_R', 'SHAPEDEV_E1', 'SHAPEDEV_E2',\n 'SHAPEDEV_R_IVAR', 'SHAPEDEV_E1_IVAR', 'SHAPEDEV_E2_IVAR',\n 'SHAPEEXP_R', 'SHAPEEXP_E1', 'SHAPEEXP_E2',\n 'SHAPEEXP_R_IVAR', 'SHAPEEXP_E1_IVAR', 'SHAPEEXP_E2_IVAR',\n 'DCHISQ'\n ]\n\n# ADM this is an empty array of the full TS data model columns and dtypes\n# ADM other columns can be added in read_tractor.\ntsdatamodel = np.array([], dtype=[\n ('RELEASE', '>i2'), ('BRICKID', '>i4'), ('BRICKNAME', 'S8'),\n ('OBJID', '>i4'), ('TYPE', 'S4'), ('RA', '>f8'), ('RA_IVAR', '>f4'),\n ('DEC', '>f8'), ('DEC_IVAR', '>f4'), ('DCHISQ', '>f4', (5,)), ('EBV', '>f4'),\n ('FLUX_G', '>f4'), ('FLUX_R', '>f4'), ('FLUX_Z', '>f4'),\n ('FLUX_IVAR_G', '>f4'), ('FLUX_IVAR_R', '>f4'), ('FLUX_IVAR_Z', '>f4'),\n ('MW_TRANSMISSION_G', '>f4'), ('MW_TRANSMISSION_R', '>f4'), ('MW_TRANSMISSION_Z', '>f4'),\n ('FRACFLUX_G', '>f4'), ('FRACFLUX_R', '>f4'), ('FRACFLUX_Z', '>f4'),\n ('FRACMASKED_G', '>f4'), ('FRACMASKED_R', '>f4'), ('FRACMASKED_Z', '>f4'),\n ('FRACIN_G', '>f4'), ('FRACIN_R', '>f4'), ('FRACIN_Z', '>f4'),\n ('NOBS_G', '>i2'), ('NOBS_R', '>i2'), ('NOBS_Z', '>i2'),\n ('PSFDEPTH_G', '>f4'), ('PSFDEPTH_R', '>f4'), ('PSFDEPTH_Z', '>f4'),\n ('GALDEPTH_G', '>f4'), ('GALDEPTH_R', '>f4'), ('GALDEPTH_Z', '>f4'),\n ('FLUX_W1', '>f4'), ('FLUX_W2', '>f4'), ('FLUX_W3', '>f4'), ('FLUX_W4', '>f4'),\n ('FLUX_IVAR_W1', '>f4'), ('FLUX_IVAR_W2', '>f4'),\n ('FLUX_IVAR_W3', '>f4'), ('FLUX_IVAR_W4', '>f4'),\n ('MW_TRANSMISSION_W1', '>f4'), ('MW_TRANSMISSION_W2', '>f4'),\n ('MW_TRANSMISSION_W3', '>f4'), ('MW_TRANSMISSION_W4', '>f4'),\n ('ALLMASK_G', '>i2'), ('ALLMASK_R', '>i2'), ('ALLMASK_Z', '>i2'),\n ('FRACDEV', '>f4'), ('FRACDEV_IVAR', '>f4'),\n ('SHAPEDEV_R', '>f4'), ('SHAPEDEV_E1', '>f4'), ('SHAPEDEV_E2', '>f4'),\n ('SHAPEDEV_R_IVAR', '>f4'), ('SHAPEDEV_E1_IVAR', '>f4'), ('SHAPEDEV_E2_IVAR', '>f4'),\n ('SHAPEEXP_R', '>f4'), ('SHAPEEXP_E1', '>f4'), ('SHAPEEXP_E2', '>f4'),\n ('SHAPEEXP_R_IVAR', '>f4'), ('SHAPEEXP_E1_IVAR', '>f4'), ('SHAPEEXP_E2_IVAR', '>f4')\n ])\n\ndr7datamodel = np.array([], dtype=[\n ('FIBERFLUX_G', '>f4'), ('FIBERFLUX_R', '>f4'), ('FIBERFLUX_Z', '>f4'),\n ('FIBERTOTFLUX_G', '>f4'), ('FIBERTOTFLUX_R', '>f4'), ('FIBERTOTFLUX_Z', '>f4'),\n ('BRIGHTSTARINBLOB', '?')\n ])\n\ndr8datamodel = np.array([], dtype=[\n ('FIBERFLUX_G', '>f4'), ('FIBERFLUX_R', '>f4'), ('FIBERFLUX_Z', '>f4'),\n ('FIBERTOTFLUX_G', '>f4'), ('FIBERTOTFLUX_R', '>f4'), ('FIBERTOTFLUX_Z', '>f4'),\n ('BRIGHTBLOB', '>i2')\n ])\n\n\ndef desitarget_nside():\n \"\"\"Default HEALPix Nside for all target selection algorithms.\"\"\"\n nside = 64\n return nside\n\n\ndef desitarget_resolve_dec():\n \"\"\"Default Dec cut to separate targets in BASS/MzLS from DECaLS.\"\"\"\n dec = 32.375\n return dec\n\n\ndef convert_from_old_data_model(fx, columns=None):\n \"\"\"Read data from open Tractor/sweeps file and convert to DR4+ data model.\n\n Parameters\n ----------\n fx : :class:`str`\n Open file object corresponding to one Tractor or sweeps file.\n columns: :class:`list`, optional\n the desired Tractor catalog columns to read\n\n Returns\n -------\n :class:`~numpy.ndarray`\n Array with the tractor schema, uppercase field names.\n\n Notes\n -----\n - Anything pre-DR3 is assumed to be DR3 (we'd already broken\n backwards-compatability with DR1 because of DECAM_DEPTH but\n this now breaks backwards-compatability with DR2)\n \"\"\"\n indata = fx[1].read(columns=columns)\n\n # ADM the number of objects in the input rec array.\n nrows = len(indata)\n\n # ADM the column names that haven't changed between the current and the old data model.\n tscolumns = list(tsdatamodel.dtype.names)\n sharedcols = list(set(tscolumns).intersection(oldtscolumns))\n\n # ADM the data types for the new data model.\n dt = tsdatamodel.dtype\n\n # ADM need to add BRICKPRIMARY and its data type, if it was passed as a column of interest.\n if ('BRICK_PRIMARY' in columns):\n sharedcols.append('BRICK_PRIMARY')\n dd = dt.descr\n dd.append(('BRICK_PRIMARY', '?'))\n dt = np.dtype(dd)\n\n # ADM create a new numpy array with the fields from the new data model...\n outdata = np.empty(nrows, dtype=dt)\n\n # ADM ...and populate them with the passed columns of data.\n for col in sharedcols:\n outdata[col] = indata[col]\n\n # ADM change the DECAM columns from the old (2-D array) to new (named 1-D array) data model.\n decamcols = ['FLUX', 'MW_TRANSMISSION', 'FRACFLUX', 'FLUX_IVAR', 'NOBS', 'GALDEPTH']\n decambands = 'UGRIZ'\n for bandnum in [1, 2, 4]:\n for colstring in decamcols:\n outdata[colstring+\"_\"+decambands[bandnum]] = indata[\"DECAM_\"+colstring][:, bandnum]\n # ADM treat DECAM_DEPTH separately as the syntax is slightly different.\n outdata[\"PSFDEPTH_\"+decambands[bandnum]] = indata[\"DECAM_DEPTH\"][:, bandnum]\n\n # ADM change the WISE columns from the old (2-D array) to new (named 1-D array) data model.\n wisecols = ['FLUX', 'MW_TRANSMISSION', 'FLUX_IVAR']\n for bandnum in [1, 2, 3, 4]:\n for colstring in wisecols:\n outdata[colstring+\"_W\"+str(bandnum)] = indata[\"WISE_\"+colstring][:, bandnum-1]\n\n # ADM we also need to include the RELEASE, which we'll always assume is DR3\n # ADM (deprecating anything from before DR3).\n outdata['RELEASE'] = 3000\n\n return outdata\n\n\ndef add_gaia_columns(indata):\n \"\"\"Add columns needed for MWS targeting to a sweeps-style array.\n\n Parameters\n ----------\n indata : :class:`~numpy.ndarray`\n Numpy structured array to which to add Gaia-relevant columns.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n Input array with the Gaia columns added.\n\n Notes\n -----\n - Gaia columns resemble the data model in :mod:`desitarget.gaiamatch`\n but with \"GAIA_RA\" and \"GAIA_DEC\" removed.\n \"\"\"\n # ADM remove the Gaia coordinates from the Gaia data model as they aren't\n # ADM in the imaging surveys data model.\n from desitarget.gaiamatch import gaiadatamodel, pop_gaia_coords\n gaiadatamodel = pop_gaia_coords(gaiadatamodel)\n\n # ADM create the combined data model.\n dt = indata.dtype.descr + gaiadatamodel.dtype.descr\n\n # ADM create a new numpy array with the fields from the new data model...\n nrows = len(indata)\n outdata = np.zeros(nrows, dtype=dt)\n\n # ADM ...and populate them with the passed columns of data.\n for col in indata.dtype.names:\n outdata[col] = indata[col]\n\n # ADM set REF_ID to -1 to indicate nothing has a Gaia match (yet).\n outdata['REF_ID'] = -1\n\n return outdata\n\n\ndef add_dr8_columns(indata):\n \"\"\"Add columns that are in dr7/dr8 that weren't in dr6.\n\n Parameters\n ----------\n indata : :class:`~numpy.ndarray`\n Numpy structured array to which to add DR7/DR8 columns.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n Input array with DR7/DR8 columns added.\n\n Notes\n -----\n - DR7 columns are stored in :mod:`desitarget.io.dr7datamodel`.\n - DR8 columns are stored in :mod:`desitarget.io.dr8datamodel`.\n - The returned columns are set to all ``0`` or ``False``.\n \"\"\"\n # ADM if BRIGHSTARINBLOB was sent (the dr7 version of BRIGHTBLOB)\n # ADM then we need to update that column.\n if 'BRIGHTSTARINBLOB' in indata.dtype.names:\n newt = dr8datamodel[\"BRIGHTBLOB\"].dtype.str\n newdt = (\"BRIGHTBLOB\", newt)\n dt = [fld if fld[0] != 'BRIGHTSTARINBLOB' else newdt\n for fld in indata.dtype.descr]\n else:\n # ADM otherwise, create the combined data model.\n dt = indata.dtype.descr + dr8datamodel.dtype.descr\n\n # ADM create a new numpy array with the fields from the new data model...\n nrows = len(indata)\n outdata = np.zeros(nrows, dtype=dt)\n\n # ADM ...and populate them with the passed columns of data.\n for col in indata.dtype.names:\n if col == \"BRIGHTSTARINBLOB\":\n outdata[\"BRIGHTBLOB\"] = indata[\"BRIGHTSTARINBLOB\"].astype(newt)\n else:\n outdata[col] = indata[col]\n\n return outdata\n\n\ndef add_photsys(indata):\n \"\"\"Add the PHOTSYS column to a sweeps-style array.\n\n Parameters\n ----------\n indata : :class:`~numpy.ndarray`\n Numpy structured array to which to add PHOTSYS column.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n Input array with PHOTSYS added (and set using RELEASE).\n\n Notes\n -----\n - The PHOTSYS column is only added if the RELEASE column\n is available in the passed `indata`.\n \"\"\"\n # ADM only add the PHOTSYS column if RELEASE exists.\n if 'RELEASE' in indata.dtype.names:\n # ADM add PHOTSYS to the data model.\n pdt = [('PHOTSYS', '|S1')]\n dt = indata.dtype.descr + pdt\n\n # ADM create a new numpy array with the fields from the new data model...\n nrows = len(indata)\n outdata = np.empty(nrows, dtype=dt)\n\n # ADM ...and populate them with the passed columns of data.\n for col in indata.dtype.names:\n outdata[col] = indata[col]\n\n # ADM add the PHOTSYS column.\n photsys = release_to_photsys(indata[\"RELEASE\"])\n outdata['PHOTSYS'] = photsys\n else:\n outdata = indata\n\n return outdata\n\n\ndef read_tractor(filename, header=False, columns=None):\n \"\"\"Read a tractor catalogue file.\n\n Parameters\n ----------\n filename : :class:`str`\n File name of one Tractor or sweeps file.\n header : :class:`bool`, optional\n If ``True``, return (data, header) instead of just data.\n columns: :class:`list`, optional\n Specify the desired Tractor catalog columns to read; defaults to\n desitarget.io.tsdatamodel.dtype.names.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n Array with the tractor schema, uppercase field names.\n \"\"\"\n check_fitsio_version()\n\n fx = fitsio.FITS(filename, upper=True)\n fxcolnames = fx[1].get_colnames()\n hdr = fx[1].read_header()\n\n if columns is None:\n readcolumns = list(tsdatamodel.dtype.names)\n # ADM if RELEASE doesn't exist, then we're pre-DR3 and need the old data model.\n if (('RELEASE' not in fxcolnames) and ('release' not in fxcolnames)):\n readcolumns = list(oldtscolumns)\n else:\n readcolumns = list(columns)\n\n # - tractor files have BRICK_PRIMARY; sweep files don't\n if (columns is None) and \\\n (('BRICK_PRIMARY' in fxcolnames) or ('brick_primary' in fxcolnames)):\n readcolumns.append('BRICK_PRIMARY')\n\n # ADM if BRIGHTSTARINBLOB exists (it does for DR7, not for DR6) add it and\n # ADM the other DR6->DR7 data model updates.\n if (columns is None) and \\\n (('BRIGHTSTARINBLOB' in fxcolnames) or ('brightstarinblob' in fxcolnames)):\n for col in dr7datamodel.dtype.names:\n readcolumns.append(col)\n # ADM if BRIGHTBLOB exists (it does for DR8, not for DR7) add it and\n # ADM the other DR6->DR8 data model updates.\n else:\n if (columns is None) and \\\n (('BRIGHTBLOB' in fxcolnames) or ('brightblob' in fxcolnames)):\n for col in dr8datamodel.dtype.names:\n readcolumns.append(col)\n\n # ADM if Gaia information was passed, add it to the columns to read.\n if (columns is None):\n if (('REF_ID' in fxcolnames) or ('ref_id' in fxcolnames)):\n # ADM remove the Gaia coordinates as they aren't in the imaging data model.\n from desitarget.gaiamatch import gaiadatamodel, pop_gaia_coords, pop_gaia_columns\n gaiadatamodel = pop_gaia_coords(gaiadatamodel)\n # ADM the DR7 sweeps don't contain these columns, but DR8 should.\n if 'REF_CAT' not in fxcolnames:\n gaiadatamodel = pop_gaia_columns(\n gaiadatamodel,\n ['REF_CAT', 'GAIA_PHOT_BP_RP_EXCESS_FACTOR',\n 'GAIA_ASTROMETRIC_SIGMA5D_MAX', 'GAIA_ASTROMETRIC_PARAMS_SOLVED']\n )\n gaiacols = gaiadatamodel.dtype.names\n readcolumns += gaiacols\n\n if (columns is None) and \\\n (('RELEASE' not in fxcolnames) and ('release' not in fxcolnames)):\n # ADM Rewrite the data completely to correspond to the DR4+ data model.\n # ADM we default to writing RELEASE = 3000 (\"DR3, or before, data\")\n data = convert_from_old_data_model(fx, columns=readcolumns)\n else:\n data = fx[1].read(columns=readcolumns)\n\n # ADM add Gaia columns if not passed.\n if (columns is None) and \\\n (('REF_ID' not in fxcolnames) and ('ref_id' not in fxcolnames)):\n data = add_gaia_columns(data)\n\n # ADM add DR8 data model updates (with zero/False) columns if not passed.\n if (columns is None) and \\\n (('BRIGHTBLOB' not in fxcolnames) and ('brightblob' not in fxcolnames)):\n data = add_dr8_columns(data)\n\n # ADM Empty (length 0) files have dtype='>f8' instead of 'S8' for brickname.\n if len(data) == 0:\n log.warning('WARNING: Empty file>', filename)\n dt = data.dtype.descr\n dt[1] = ('BRICKNAME', 'S8')\n data = data.astype(np.dtype(dt))\n\n # ADM To circumvent whitespace bugs on I/O from fitsio.\n # ADM need to strip any white space from string columns.\n for colname in data.dtype.names:\n kind = data[colname].dtype.kind\n if kind == 'U' or kind == 'S':\n data[colname] = np.char.rstrip(data[colname])\n\n # ADM add the PHOTSYS column to unambiguously check whether we're using imaging\n # ADM from the \"North\" or \"South\".\n data = add_photsys(data)\n\n if header:\n fx.close()\n return data, hdr\n else:\n fx.close()\n return data\n\n\ndef fix_tractor_dr1_dtype(objects):\n \"\"\"DR1 tractor files have inconsistent dtype for the TYPE field. Fix this.\n\n Args:\n objects : numpy structured array from target file.\n\n Returns:\n structured array with TYPE.dtype = 'S4' if needed.\n\n If the type was already correct, returns the original array.\n \"\"\"\n if objects['TYPE'].dtype == 'S4':\n return objects\n else:\n dt = objects.dtype.descr\n for i in range(len(dt)):\n if dt[i][0] == 'TYPE':\n dt[i] = ('TYPE', 'S4')\n break\n return objects.astype(np.dtype(dt))\n\n\ndef release_to_photsys(release):\n \"\"\"Convert RELEASE to PHOTSYS using the releasedict lookup table.\n\n Parameters\n ----------\n objects : :class:`int` or :class:`~numpy.ndarray`\n RELEASE column from a numpy rec array of targets.\n\n Returns\n -------\n :class:`str` or :class:`~numpy.ndarray`\n 'N' if the RELEASE corresponds to the northern photometric\n system (MzLS+BASS) and 'S' if it's the southern system (DECaLS).\n\n Notes\n -----\n Defaults to 'U' if the system is not recognized.\n \"\"\"\n # ADM arrays of the key (RELEASE) and value (PHOTSYS) entries in the releasedict.\n releasenums = np.array(list(releasedict.keys()))\n photstrings = np.array(list(releasedict.values()))\n\n # ADM an array with indices running from 0 to the maximum release number + 1.\n r2p = np.empty(np.max(releasenums)+1, dtype='|S1')\n\n # ADM set each entry to 'U' for an unidentified photometric system.\n r2p[:] = 'U'\n\n # ADM populate where the release numbers exist with the PHOTSYS.\n r2p[releasenums] = photstrings\n\n # ADM return the PHOTSYS string that corresponds to each passed release number.\n return r2p[release]\n\n\ndef write_targets(filename, data, indir=None, qso_selection=None,\n sandboxcuts=False, nside=None, survey=\"?\",\n nsidefile=None, hpxlist=None):\n \"\"\"Write a target catalogue.\n\n Parameters\n ----------\n filename : :class:`str`\n output target selection file.\n data : :class:`~numpy.ndarray`\n numpy structured array of targets to save.\n indir, qso_selection : :class:`str`, optional, default to `None`\n If passed, note these as the input directory and\n quasar selection method in the output file header.\n sandboxcuts : :class:`bool`, optional, defaults to ``False``\n If passed, note this whether we ran target seletion\n in the sandbox in the output file header.\n nside : :class:`int`, optional, defaults to `None`\n If passed, add a column to the targets array popluated\n with HEALPixels at resolution `nside`.\n survey : :class:`str`, optional, defaults to \"?\"\n Written to output file header as the keyword `SURVEY`.\n nsidefile : :class:`int`, optional, defaults to `None`\n Passed to indicate in the output file header that the targets\n have been limited to only certain HEALPixels at a given\n nside. Used in conjunction with `hpxlist`.\n hpxlist : :class:`list`, optional, defaults to `None`\n Passed to indicate in the output file header that the targets\n have been limited to only this list of HEALPixels. Used in\n conjunction with `nsidefile`.\n \"\"\"\n # FIXME: assert data and tsbits schema\n\n # ADM use RELEASE to determine the release string for the input targets.\n ntargs = len(data)\n if ntargs == 0:\n # ADM if there are no targets, then we don't know the Data Release.\n drstring = 'unknowndr'\n else:\n drint = np.max(data['RELEASE']//1000)\n drstring = 'dr'+str(drint)\n\n # - Create header to include versions, etc.\n hdr = fitsio.FITSHDR()\n depend.setdep(hdr, 'desitarget', desitarget_version)\n depend.setdep(hdr, 'desitarget-git', gitversion())\n depend.setdep(hdr, 'sandboxcuts', sandboxcuts)\n depend.setdep(hdr, 'photcat', drstring)\n\n if indir is not None:\n depend.setdep(hdr, 'tractor-files', indir)\n\n if qso_selection is None:\n log.warning('qso_selection method not specified for output file')\n depend.setdep(hdr, 'qso-selection', 'unknown')\n else:\n depend.setdep(hdr, 'qso-selection', qso_selection)\n\n # ADM add HEALPix column, if requested by input.\n if nside is not None:\n theta, phi = np.radians(90-data[\"DEC\"]), np.radians(data[\"RA\"])\n hppix = hp.ang2pix(nside, theta, phi, nest=True)\n data = rfn.append_fields(data, 'HPXPIXEL', hppix, usemask=False)\n hdr['HPXNSIDE'] = nside\n hdr['HPXNEST'] = True\n\n # ADM populate SUBPRIORITY with a reproducible random float.\n if \"SUBPRIORITY\" in data.dtype.names:\n np.random.seed(616)\n data[\"SUBPRIORITY\"] = np.random.random(ntargs)\n\n # ADM add the type of survey (main, commissioning; or \"cmx\", sv) to the header.\n hdr[\"SURVEY\"] = survey\n\n # ADM record whether this file has been limited to only certain HEALPixels.\n if hpxlist is not None or nsidefile is not None:\n # ADM hpxlist and nsidefile need to be passed together.\n if hpxlist is None or nsidefile is None:\n msg = 'Both hpxlist (={}) and nsidefile (={}) need to be set' \\\n .format(hpxlist, nsidefile)\n log.critical(msg)\n raise ValueError(msg)\n hdr['FILENSID'] = nsidefile\n hdr['FILENEST'] = True\n hdr['FILEHPX'] = hpxlist\n\n fitsio.write(filename, data, extname='TARGETS', header=hdr, clobber=True)\n\n\ndef write_skies(filename, data, indir=None, apertures_arcsec=None,\n nskiespersqdeg=None, nside=None):\n \"\"\"Write a target catalogue of sky locations.\n\n Parameters\n ----------\n filename : :class:`str`\n Output target selection file name\n data : :class:`~numpy.ndarray`\n Array of skies to write to file.\n indir : :class:`str`, optional\n Name of input Legacy Survey Data Release directory, write to header\n of output file if passed (and if not None).\n apertures_arcsec : :class:`list` or `float`, optional\n list of aperture radii in arcseconds to write each aperture as an\n individual line in the header, if passed (and if not None).\n nskiespersqdeg : :class:`float`, optional\n Number of sky locations generated per sq. deg., write to header\n of output file if passed (and if not None).\n nside: :class:`int`, optional\n If passed, add a column to the skies array popluated with HEALPixels\n at resolution `nside`.\n \"\"\"\n nskies = len(data)\n\n # ADM force OBSCONDITIONS to be 65535\n # ADM (see https://github.com/desihub/desitarget/pull/313).\n data[\"OBSCONDITIONS\"] = 2**16-1\n\n # - Create header to include versions, etc.\n hdr = fitsio.FITSHDR()\n depend.setdep(hdr, 'desitarget', desitarget_version)\n depend.setdep(hdr, 'desitarget-git', gitversion())\n\n if indir is not None:\n depend.setdep(hdr, 'input-data-release', indir)\n # ADM note that if 'dr' is not in the indir DR\n # ADM directory structure, garbage will\n # ADM be rewritten gracefully in the header.\n drstring = 'dr'+indir.split('dr')[-1][0]\n depend.setdep(hdr, 'photcat', drstring)\n\n if apertures_arcsec is not None:\n for i, ap in enumerate(apertures_arcsec):\n apname = \"AP{}\".format(i)\n apsize = ap\n hdr[apname] = apsize\n\n if nskiespersqdeg is not None:\n hdr['NPERSDEG'] = nskiespersqdeg\n\n # ADM add HEALPix column, if requested by input.\n if nside is not None:\n theta, phi = np.radians(90-data[\"DEC\"]), np.radians(data[\"RA\"])\n hppix = hp.ang2pix(nside, theta, phi, nest=True)\n data = rfn.append_fields(data, 'HPXPIXEL', hppix, usemask=False)\n hdr['HPXNSIDE'] = nside\n hdr['HPXNEST'] = True\n\n # ADM populate SUBPRIORITY with a reproducible random float.\n if \"SUBPRIORITY\" in data.dtype.names:\n np.random.seed(616)\n data[\"SUBPRIORITY\"] = np.random.random(nskies)\n\n fitsio.write(filename, data, extname='SKY_TARGETS', header=hdr, clobber=True)\n\n\ndef write_gfas(filename, data, indir=None, nside=None, survey=\"?\",\n gaiaepoch=None):\n \"\"\"Write a catalogue of Guide/Focus/Alignment targets.\n\n Parameters\n ----------\n filename : :class:`str`\n Output file name.\n data : :class:`~numpy.ndarray`\n Array of GFAs to write to file.\n indir : :class:`str`, optional, defaults to None.\n Name of input Legacy Survey Data Release directory, write to header\n of output file if passed (and if not None).\n nside: :class:`int`, defaults to None.\n If passed, add a column to the GFAs array popluated with HEALPixels\n at resolution `nside`.\n survey : :class:`str`, optional, defaults to \"?\"\n Written to output file header as the keyword `SURVEY`.\n gaiaepoch: :class:`float`, defaults to None\n Gaia proper motion reference epoch. If not None, write to header of\n output file. If None, default to an epoch of 2015.5.\n \"\"\"\n # ADM rename 'TYPE' to 'MORPHTYPE'.\n data = rfn.rename_fields(data, {'TYPE': 'MORPHTYPE'})\n\n # ADM create header to include versions, etc.\n hdr = fitsio.FITSHDR()\n depend.setdep(hdr, 'desitarget', desitarget_version)\n depend.setdep(hdr, 'desitarget-git', gitversion())\n\n if indir is not None:\n depend.setdep(hdr, 'input-data-release', indir)\n # ADM note that if 'dr' is not in the indir DR\n # ADM directory structure, garbage will\n # ADM be rewritten gracefully in the header.\n drstring = 'dr'+indir.split('dr')[-1][0]\n depend.setdep(hdr, 'photcat', drstring)\n\n # ADM add HEALPix column, if requested by input.\n if nside is not None:\n theta, phi = np.radians(90-data[\"DEC\"]), np.radians(data[\"RA\"])\n hppix = hp.ang2pix(nside, theta, phi, nest=True)\n data = rfn.append_fields(data, 'HPXPIXEL', hppix, usemask=False)\n hdr['HPXNSIDE'] = nside\n hdr['HPXNEST'] = True\n\n # ADM add the type of survey (main, or commissioning \"cmx\") to the header.\n hdr[\"SURVEY\"] = survey\n\n # ADM add the Gaia reference epoch, or pass 2015.5 if not included.\n hdr['REFEPOCH'] = {'name': 'REFEPOCH',\n 'value': 2015.5,\n 'comment': \"Gaia Proper Motion Reference Epoch\"}\n if gaiaepoch is not None:\n hdr['REFEPOCH'] = gaiaepoch\n\n fitsio.write(filename, data, extname='GFA_TARGETS', header=hdr, clobber=True)\n\n\ndef write_randoms(filename, data, indir=None, hdr=None, nside=None, density=None):\n \"\"\"Write a catalogue of randoms and associated pixel-level information.\n\n Parameters\n ----------\n filename : :class:`str`\n Output file name.\n data : :class:`~numpy.ndarray`\n Array of randoms to write to file.\n indir : :class:`str`, optional, defaults to None\n Name of input Legacy Survey Data Release directory, write to header\n of output file if passed (and if not None).\n hdr : :class:`str`, optional, defaults to `None`\n If passed, use this header to start the header of the output `filename`.\n nside: :class:`int`\n If passed, add a column to the randoms array popluated with HEALPixels\n at resolution `nside`.\n density: :class:`int`\n Number of points per sq. deg. at which the catalog was generated,\n write to header of the output file if not None.\n \"\"\"\n # ADM create header to include versions, etc. If a `hdr` was\n # ADM passed, then use it, if not then create a new header.\n if hdr is None:\n hdr = fitsio.FITSHDR()\n depend.setdep(hdr, 'desitarget', desitarget_version)\n depend.setdep(hdr, 'desitarget-git', gitversion())\n\n if indir is not None:\n depend.setdep(hdr, 'input-data-release', indir)\n # ADM note that if 'dr' is not in the indir DR\n # ADM directory structure, garbage will\n # ADM be rewritten gracefully in the header.\n drstring = 'dr'+indir.split('dr')[-1][0]\n depend.setdep(hdr, 'photcat', drstring)\n # ADM also write the mask bits header information\n # ADM from a mask bits file in this DR.\n from glob import iglob\n files = iglob(indir+'/coadd/*/*/*maskbits*')\n # ADM we built an iterator over mask bits files for speed\n # ADM if there are no such files to iterate over, just pass.\n try:\n fn = next(files)\n mbhdr = fitsio.read_header(fn)\n # ADM extract the keys that include the string 'BITNM'.\n bncols = [key for key in mbhdr.keys() if 'BITNM' in key]\n for col in bncols:\n hdr[col] = {'name': col,\n 'value': mbhdr[col],\n 'comment': mbhdr.get_comment(col)}\n except StopIteration:\n pass\n\n # ADM add HEALPix column, if requested by input.\n if nside is not None:\n theta, phi = np.radians(90-data[\"DEC\"]), np.radians(data[\"RA\"])\n hppix = hp.ang2pix(nside, theta, phi, nest=True)\n data = rfn.append_fields(data, 'HPXPIXEL', hppix, usemask=False)\n hdr['HPXNSIDE'] = nside\n hdr['HPXNEST'] = True\n\n # ADM add density of points if requested by input.\n if density is not None:\n hdr['DENSITY'] = density\n\n fitsio.write(filename, data, extname='RANDOMS', header=hdr, clobber=True)\n\n\ndef iter_files(root, prefix, ext='fits'):\n \"\"\"Iterator over files under in `root` directory with given `prefix` and\n extension.\n \"\"\"\n if os.path.isdir(root):\n for dirpath, dirnames, filenames in os.walk(root, followlinks=True):\n for filename in filenames:\n if filename.startswith(prefix) and filename.endswith('.'+ext):\n yield os.path.join(dirpath, filename)\n else:\n filename = os.path.basename(root)\n if filename.startswith(prefix) and filename.endswith('.'+ext):\n yield root\n\n\ndef list_sweepfiles(root):\n \"\"\"Return a list of sweep files found under `root` directory.\n \"\"\"\n # ADM check for duplicate files in case the listing was run\n # ADM at too low a level in the directory structure.\n check = [os.path.basename(x) for x in iter_sweepfiles(root)]\n if len(check) != len(set(check)):\n log.error(\"Duplicate sweep files in root directory!\")\n\n return [x for x in iter_sweepfiles(root)]\n\n\ndef iter_sweepfiles(root):\n \"\"\"Iterator over all sweep files found under root directory.\n \"\"\"\n return iter_files(root, prefix='sweep', ext='fits')\n\n\ndef list_tractorfiles(root):\n \"\"\"Return a list of tractor files found under `root` directory.\n \"\"\"\n # ADM check for duplicate files in case the listing was run\n # ADM at too low a level in the directory structure.\n check = [os.path.basename(x) for x in iter_tractorfiles(root)]\n if len(check) != len(set(check)):\n log.error(\"Duplicate Tractor files in root directory!\")\n\n return [x for x in iter_tractorfiles(root)]\n\n\ndef iter_tractorfiles(root):\n \"\"\"Iterator over all tractor files found under `root` directory.\n\n Parameters\n ----------\n root : :class:`str`\n Path to start looking. Can be a directory or a single file.\n\n Returns\n -------\n iterable\n An iterator of (brickname, filename).\n\n Examples\n --------\n >>> for brickname, filename in iter_tractor('./'):\n >>> print(brickname, filename)\n \"\"\"\n return iter_files(root, prefix='tractor', ext='fits')\n\n\ndef brickname_from_filename(filename):\n \"\"\"Parse `filename` to check if this is a tractor brick file.\n\n Parameters\n ----------\n filename : :class:`str`\n Name of a tractor brick file.\n\n Returns\n -------\n :class:`str`\n Name of the brick in the file name.\n\n Raises\n ------\n ValueError\n If the filename does not appear to be a valid tractor brick file.\n \"\"\"\n if not filename.endswith('.fits'):\n raise ValueError(\"Invalid tractor brick file: {}!\".format(filename))\n #\n # Match filename tractor-0003p027.fits -> brickname 0003p027.\n # Also match tractor-00003p0027.fits, just in case.\n #\n match = re.search('tractor-(\\d{4,5}[pm]\\d{3,4})\\.fits',\n os.path.basename(filename))\n\n if match is None:\n raise ValueError(\"Invalid tractor brick file: {}!\".format(filename))\n return match.group(1)\n\n\ndef brickname_from_filename_with_prefix(filename, prefix=''):\n \"\"\"Parse `filename` to check if this is a brick file with a given prefix.\n\n Parameters\n ----------\n filename : :class:`str`\n Full name of a brick file.\n prefix : :class:`str`\n Optional part of filename immediately preceding the brickname.\n\n Returns\n -------\n :class:`str`\n Name of the brick in the file name.\n\n Raises\n ------\n ValueError\n If the filename does not appear to be a valid brick file.\n \"\"\"\n if not filename.endswith('.fits'):\n raise ValueError(\"Invalid galaxia mock brick file: {}!\".format(filename))\n #\n # Match filename tractor-0003p027.fits -> brickname 0003p027.\n # Also match tractor-00003p0027.fits, just in case.\n #\n match = re.search('%s_(\\d{4,5}[pm]\\d{3,4})\\.fits'%(prefix),\n os.path.basename(filename))\n\n if match is None:\n raise ValueError(\"Invalid galaxia mock brick file: {}!\".format(filename))\n return match.group(1)\n\n\ndef check_fitsio_version(version='0.9.8'):\n \"\"\"fitsio_ prior to 0.9.8rc1 has a bug parsing boolean columns.\n\n .. _fitsio: https://pypi.python.org/pypi/fitsio\n\n Parameters\n ----------\n version : :class:`str`, optional\n Default '0.9.8'. Having this parameter allows future-proofing and\n easier testing.\n\n Raises\n ------\n ImportError\n If the fitsio version is insufficiently recent.\n \"\"\"\n from distutils.version import LooseVersion\n #\n # LooseVersion doesn't handle rc1 as we want, so also check for 0.9.8xxx.\n #\n if (\n LooseVersion(fitsio.__version__) < LooseVersion(version) and\n not fitsio.__version__.startswith(version)\n ):\n raise ImportError(('ERROR: fitsio >{0}rc1 required ' +\n '(not {1})!').format(version, fitsio.__version__))\n\n\ndef whitespace_fits_read(filename, **kwargs):\n \"\"\"Use fitsio_ to read in a file and strip whitespace from all string columns.\n\n .. _fitsio: https://pypi.python.org/pypi/fitsio\n\n Parameters\n ----------\n filename : :class:`str`\n Name of the file to be read in by fitsio.\n kwargs: arguments that will be passed directly to fitsio.\n \"\"\"\n fitout = fitsio.read(filename, **kwargs)\n # ADM if the header=True option was passed then\n # ADM the output is the header and the data.\n data = fitout\n if 'header' in kwargs:\n data, header = fitout\n\n # ADM guard against the zero-th extension being read by fitsio.\n if data is not None:\n # ADM strip any whitespace from string columns.\n for colname in data.dtype.names:\n kind = data[colname].dtype.kind\n if kind == 'U' or kind == 'S':\n data[colname] = np.char.rstrip(data[colname])\n\n if 'header' in kwargs:\n return data, header\n\n return data\n\n\ndef load_pixweight(inmapfile, nside, pixmap=None):\n \"\"\"Loads a pixel map from file and resamples to a different HEALPixel resolution (nside)\n\n Parameters\n ----------\n inmapfile : :class:`str`\n Name of the file containing the pixel weight map.\n nside : :class:`int`\n After loading, the array will be resampled to this HEALPix nside.\n pixmap: `~numpy.array`, optional, defaults to None\n Pass a pixel map instead of loading it from file.\n\n Returns\n -------\n :class:`~numpy.array`\n HEALPixel weight map resampled to the requested nside.\n \"\"\"\n if pixmap is not None:\n log.debug('Using input pixel weight map of length {}.'.format(len(pixmap)))\n else:\n # ADM read in the pixel weights file.\n if not os.path.exists(inmapfile):\n log.fatal('Input directory does not exist: {}'.format(inmapfile))\n raise ValueError\n pixmap = fitsio.read(inmapfile)\n\n # ADM determine the file's nside, and flag a warning if the passed nside exceeds it.\n npix = len(pixmap)\n truenside = hp.npix2nside(len(pixmap))\n if truenside < nside:\n log.warning(\"downsampling is fuzzy...Passed nside={}, but file {} is stored at nside={}\"\n .format(nside, inmapfile, truenside))\n\n # ADM resample the map.\n return hp.pixelfunc.ud_grade(pixmap, nside, order_in='NESTED', order_out='NESTED')\n\n\ndef load_pixweight_recarray(inmapfile, nside, pixmap=None):\n \"\"\"Like load_pixweight but for a structured array map with multiple columns\n\n Parameters\n ----------\n inmapfile : :class:`str`\n Name of the file containing the pixel weight map.\n nside : :class:`int`\n After loading, the array will be resampled to this HEALPix nside.\n pixmap: `~numpy.array`, optional, defaults to None\n Pass a pixel map instead of loading it from file.\n\n Returns\n -------\n :class:`~numpy.array`\n HEALPixel weight map with all columns resampled to the requested nside.\n\n Notes\n -----\n - Assumes that tha passed map is in the NESTED scheme, and outputs to\n the NESTED scheme.\n - All columns are resampled as the mean of the relevant pixels, except\n if a column `HPXPIXEL` is passed. That column is reassigned the appropriate\n pixel number at the new nside.\n \"\"\"\n if pixmap is not None:\n log.debug('Using input pixel weight map of length {}.'.format(len(pixmap)))\n else:\n # ADM read in the pixel weights file.\n if not os.path.exists(inmapfile):\n log.fatal('Input directory does not exist: {}'.format(inmapfile))\n raise ValueError\n pixmap = fitsio.read(inmapfile)\n\n # ADM determine the file's nside, and flag a warning if the passed nside exceeds it.\n npix = len(pixmap)\n truenside = hp.npix2nside(len(pixmap))\n if truenside < nside:\n log.warning(\"downsampling is fuzzy...Passed nside={}, but file {} is stored at nside={}\"\n .format(nside, inmapfile, truenside))\n\n # ADM set up an output array.\n nrows = hp.nside2npix(nside)\n outdata = np.zeros(nrows, dtype=pixmap.dtype)\n\n # ADM resample the map for each column.\n for col in pixmap.dtype.names:\n outdata[col] = hp.pixelfunc.ud_grade(pixmap[col], nside, order_in='NESTED', order_out='NESTED')\n\n # ADM if one column was the HEALPixel number, recalculate for the new resolution.\n if 'HPXPIXEL' in pixmap.dtype.names:\n outdata[\"HPXPIXEL\"] = np.arange(nrows)\n\n return outdata\n\n\ndef gitversion():\n \"\"\"Returns `git describe --tags --dirty --always`,\n or 'unknown' if not a git repo\"\"\"\n import os\n from subprocess import Popen, PIPE, STDOUT\n origdir = os.getcwd()\n os.chdir(os.path.dirname(__file__))\n try:\n p = Popen(['git', \"describe\", \"--tags\", \"--dirty\", \"--always\"], stdout=PIPE, stderr=STDOUT)\n except EnvironmentError:\n return 'unknown'\n\n os.chdir(origdir)\n out = p.communicate()[0]\n if p.returncode == 0:\n # - avoid py3 bytes and py3 unicode; get native str in both cases\n return str(out.rstrip().decode('ascii'))\n else:\n return 'unknown'\n\n\ndef read_external_file(filename, header=False, columns=[\"RA\", \"DEC\"]):\n \"\"\"Read FITS file with loose requirements on upper-case columns and EXTNAME.\n\n Parameters\n ----------\n filename : :class:`str`\n File name with full directory path included.\n header : :class:`bool`, optional, defaults to ``False``\n If ``True`` then return (data, header) instead of just data.\n columns: :class:`list`, optional, defaults to [\"RA\", \"DEC\"]\n Specify the desired columns to read.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n The output data array.\n :class:`~numpy.ndarray`, optional\n The output file header, if input `header` was ``True``.\n\n Notes\n -----\n - Intended to be used with externally supplied files such as locations\n to be matched for commissioning or secondary targets.\n \"\"\"\n # ADM check we aren't going to have an epic fail on the the version of fitsio.\n check_fitsio_version()\n\n # ADM prepare to read in the data by reading in columns.\n fx = fitsio.FITS(filename, upper=True)\n fxcolnames = fx[1].get_colnames()\n hdr = fx[1].read_header()\n\n # ADM convert the columns to upper case...\n colnames = [colname.upper() for colname in fxcolnames]\n # ADM ...and fail if RA and DEC aren't columns.\n if not (\"RA\" in colnames and \"DEC\" in colnames):\n msg = 'Input file {} must contain both \"RA\" and \"DEC\" columns' \\\n .format(filename)\n log.critical(msg)\n raise ValueError(msg)\n\n # ADM read in the RA/DEC columns.\n outdata = fx[1].read(columns=[\"RA\", \"DEC\"])\n\n # ADM return data read in from file, with the header if requested.\n fx.close()\n if header:\n return outdata, hdr\n else:\n return outdata\n\n\ndef decode_sweep_name(sweepname, nside=None, inclusive=True, fact=4):\n \"\"\"Retrieve RA/Dec edges from a full directory path to a sweep file\n\n Parameters\n ----------\n sweepname : :class:`str`\n Full path to a sweep file, e.g., /a/b/c/sweep-350m005-360p005.fits\n nside : :class:`int`, optional, defaults to None\n (NESTED) HEALPixel nside\n inclusive : :class:`book`, optional, defaults to ``True``\n see documentation for `healpy.query_polygon()`\n fact : :class:`int`, optional defaults to 4\n see documentation for `healpy.query_polygon()`\n\n Returns\n -------\n :class:`list` (if nside is None)\n A 4-entry list of the edges of the region covered by the sweeps file\n in the form [RAmin, RAmax, DECmin, DECmax]\n For the above example this would be [350., 360., -5., 5.]\n :class:`list` (if nside is not None)\n A list of HEALPixels that touch the files at the passed `nside`\n For the above example this would be [16, 17, 18, 19]\n \"\"\"\n # ADM extract just the file part of the name.\n sweepname = os.path.basename(sweepname)\n\n # ADM the RA/Dec edges.\n ramin, ramax = float(sweepname[6:9]), float(sweepname[14:17])\n decmin, decmax = float(sweepname[10:13]), float(sweepname[18:21])\n\n # ADM flip the signs on the DECs, if needed.\n if sweepname[9] == 'm':\n decmin *= -1\n if sweepname[17] == 'm':\n decmax *= -1\n\n if nside is None:\n return [ramin, ramax, decmin, decmax]\n\n pixnum = hp_in_box(nside, [ramin, ramax, decmin, decmax],\n inclusive=inclusive, fact=fact)\n\n return pixnum\n\n\ndef check_hp_target_dir(hpdirname):\n \"\"\"Check fidelity of a directory of HEALPixel-partitioned targets.\n\n Parameters\n ----------\n hpdirname : :class:`str`\n Full path to a directory containing targets that have been\n split by HEALPixel.\n\n Returns\n -------\n :class:`int`\n The HEALPixel NSIDE for the files in the passed directory.\n :class:`dict`\n A dictionary where the keys are each HEALPixel covered in the\n passed directory and the values are the file that includes\n that HEALPixel.\n\n Notes\n -----\n - Checks that all files are at the same NSIDE.\n - Checks that no two files contain the same HEALPixels.\n - Checks that HEALPixel numbers are consistent with NSIDE.\n \"\"\"\n # ADM glob all the files in the directory, read the pixel\n # ADM numbers and NSIDEs.\n nside = []\n pixlist = []\n fns = glob(os.path.join(hpdirname, \"*fits\"))\n pixdict = {}\n for fn in fns:\n hdr = fitsio.read_header(fn, \"TARGETS\")\n nside.append(hdr[\"FILENSID\"])\n pixels = hdr[\"FILEHPX\"]\n # ADM create a look-up dictionary of file-for-each-pixel.\n for pix in pixels:\n pixdict[pix] = fn\n pixlist.append(pixels)\n nside = np.array(nside)\n # ADM as well as having just an array of all the pixels.\n pixlist = np.hstack(pixlist)\n\n msg = None\n # ADM check all NSIDEs are the same.\n if not np.all(nside == nside[0]):\n msg = 'Not all files in {} are at the same NSIDE' \\\n .format(hpdirname)\n\n # ADM check that no two files contain the same HEALPixels.\n if not len(set(pixlist)) == len(pixlist):\n dup = set([pix for pix in pixlist if list(pixlist).count(pix) > 1])\n msg = 'Duplicate pixel ({}) in files in {}' \\\n .format(dup, hpdirname)\n\n # ADM check that the pixels are consistent with the nside.\n goodpix = np.arange(hp.nside2npix(nside[0]))\n badpix = set(pixlist) - set(goodpix)\n if len(badpix) > 0:\n msg = 'Pixel ({}) not allowed at NSIDE={} in {}'. \\\n format(badpix, nside[0], hpdirname)\n\n if msg is not None:\n log.critical(msg)\n raise AssertionError(msg)\n\n return nside[0], pixdict\n\n\ndef read_targets_in_hp(hpdirname, nside, pixlist, columns=None):\n \"\"\"Read in targets in a set of HEALPixels.\n\n Parameters\n ----------\n hpdirname : :class:`str`\n Full path to either a directory containing targets that\n have been partitioned by HEALPixel (i.e. as made by\n `select_targets` with the `bundle_files` option). Or the\n name of a single file of targets.\n nside : :class:`int`\n The (NESTED) HEALPixel nside.\n pixlist : :class:`list` or `int` or `~numpy.ndarray`\n Return targets in these HEALPixels at the passed `nside`.\n columns : :class:`list`, optional\n Only read in these target columns.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n An array of targets in the passed pixels.\n \"\"\"\n # ADM we'll need RA/Dec for final cuts, so ensure they're read.\n addedcols = []\n columnscopy = None\n if columns is not None:\n # ADM make a copy of columns, as it's a kwarg we'll modify.\n columnscopy = columns.copy()\n for radec in [\"RA\", \"DEC\"]:\n if radec not in columnscopy:\n columnscopy.append(radec)\n addedcols.append(radec)\n\n # ADM if a directory was passed, do fancy HEALPixel parsing...\n if os.path.isdir(hpdirname):\n # ADM check, and grab information from, the target directory.\n filenside, filedict = check_hp_target_dir(hpdirname)\n\n # ADM change the passed pixels to the nside of the file schema.\n filepixlist = nside2nside(nside, filenside, pixlist)\n\n # ADM only consider pixels for which we have a file.\n isindict = [pix in filedict for pix in filepixlist]\n filepixlist = filepixlist[isindict]\n\n # ADM make sure each file is only read once.\n infiles = set([filedict[pix] for pix in filepixlist])\n\n # ADM read in the files and concatenate the resulting targets.\n targets = []\n for infile in infiles:\n targets.append(fitsio.read(infile, columns=columnscopy))\n targets = np.concatenate(targets)\n # ADM ...otherwise just read in the targets.\n else:\n targets = fitsio.read(hpdirname, columns=columnscopy)\n\n # ADM restrict the targets to the actual requested HEALPixels...\n ii = is_in_hp(targets, nside, pixlist)\n # ADM ...and remove RA/Dec columns if we added them.\n targets = rfn.drop_fields(targets[ii], addedcols)\n\n return targets\n\n\ndef read_targets_in_box(hpdirname, radecbox=[0., 360., -90., 90.],\n columns=None):\n \"\"\"Read in targets in an RA/Dec box.\n\n Parameters\n ----------\n hpdirname : :class:`str`\n Full path to either a directory containing targets that\n have been partitioned by HEALPixel (i.e. as made by\n `select_targets` with the `bundle_files` option). Or the\n name of a single file of targets.\n radecbox : :class:`list`, defaults to the entire sky\n 4-entry list of coordinates [ramin, ramax, decmin, decmax]\n forming the edges of a box in RA/Dec (degrees).\n columns : :class:`list`, optional\n Only read in these target columns.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n An array of targets in the passed RA/Dec box.\n \"\"\"\n # ADM we'll need RA/Dec for final cuts, so ensure they're read.\n addedcols = []\n columnscopy = None\n if columns is not None:\n # ADM make a copy of columns, as it's a kwarg we'll modify.\n columnscopy = columns.copy()\n for radec in [\"RA\", \"DEC\"]:\n if radec not in columnscopy:\n columnscopy.append(radec)\n addedcols.append(radec)\n\n # ADM if a directory was passed, do fancy HEALPixel parsing...\n if os.path.isdir(hpdirname):\n # ADM approximate nside for area of passed box.\n nside = pixarea2nside(box_area(radecbox))\n\n # ADM HEALPixels that touch the box for that nside.\n pixlist = hp_in_box(nside, radecbox)\n\n # ADM read in targets in these HEALPixels.\n targets = read_targets_in_hp(hpdirname, nside, pixlist,\n columns=columnscopy)\n # ADM ...otherwise just read in the targets.\n else:\n targets = fitsio.read(hpdirname, columns=columnscopy)\n\n # ADM restrict only to targets in the requested RA/Dec box...\n ii = is_in_box(targets, radecbox)\n # ADM ...and remove RA/Dec columns if we added them.\n targets = rfn.drop_fields(targets[ii], addedcols)\n\n return targets\n\n\ndef read_targets_in_cap(hpdirname, radecrad, columns=None):\n \"\"\"Read in targets in an RA, Dec, radius cap.\n\n Parameters\n ----------\n hpdirname : :class:`str`\n Full path to either a directory containing targets that\n have been partitioned by HEALPixel (i.e. as made by\n `select_targets` with the `bundle_files` option). Or the\n name of a single file of targets.\n radecrad : :class:`list`\n 3-entry list of coordinates [ra, dec, radius] forming a cap or\n \"circle\" on the sky. ra, dec and radius are all in degrees.\n columns : :class:`list`, optional\n Only read in these target columns.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n An array of targets in the passed RA/Dec box.\n \"\"\"\n # ADM we'll need RA/Dec for final cuts, so ensure they're read.\n addedcols = []\n columnscopy = None\n if columns is not None:\n # ADM make a copy of columns, as it's a kwarg we'll modify.\n columnscopy = columns.copy()\n for radec in [\"RA\", \"DEC\"]:\n if radec not in columnscopy:\n columnscopy.append(radec)\n addedcols.append(radec)\n\n # ADM if a directory was passed, do fancy HEALPixel parsing...\n if os.path.isdir(hpdirname):\n # ADM approximate nside for area of passed cap.\n nside = pixarea2nside(cap_area(np.array(radecrad[2])))\n\n # ADM HEALPixels that touch the cap for that nside.\n pixlist = hp_in_cap(nside, radecrad)\n\n # ADM read in targets in these HEALPixels.\n targets = read_targets_in_hp(hpdirname, nside, pixlist,\n columns=columnscopy)\n # ADM ...otherwise just read in the targets.\n else:\n targets = fitsio.read(hpdirname, columns=columnscopy)\n\n # ADM restrict only to targets in the requested cap...\n ii = is_in_cap(targets, radecrad)\n # ADM ...and remove RA/Dec columns if we added them.\n targets = rfn.drop_fields(targets[ii], addedcols)\n\n return targets\n", "sub_path": "py/desitarget/io.py", "file_name": "io.py", "file_ext": "py", "file_size_in_byte": 50879, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "desiutil.log.get_logger", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 145, "usage_type": "call"}, {"api_name": "desitarget.gaiamatch.gaiadatamodel", "line_number": 194, "usage_type": "name"}, {"api_name": "desitarget.gaiamatch.pop_gaia_coords", "line_number": 194, "usage_type": "call"}, {"api_name": "desitarget.gaiamatch.gaiadatamodel.dtype", "line_number": 197, "usage_type": "attribute"}, {"api_name": "desitarget.gaiamatch.gaiadatamodel", "line_number": 197, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 283, "usage_type": "call"}, {"api_name": "fitsio.FITS", "line_number": 318, "usage_type": "call"}, {"api_name": "desitarget.gaiamatch.gaiadatamodel", "line_number": 354, "usage_type": "name"}, {"api_name": "desitarget.gaiamatch.pop_gaia_coords", "line_number": 354, "usage_type": "call"}, {"api_name": "desitarget.gaiamatch.gaiadatamodel", "line_number": 357, "usage_type": "name"}, {"api_name": "desitarget.gaiamatch.pop_gaia_columns", "line_number": 357, "usage_type": "call"}, {"api_name": "desitarget.gaiamatch.gaiadatamodel", "line_number": 358, "usage_type": "argument"}, {"api_name": "desitarget.gaiamatch.gaiadatamodel.dtype", "line_number": 362, "usage_type": "attribute"}, {"api_name": "desitarget.gaiamatch.gaiadatamodel", "line_number": 362, "usage_type": "name"}, {"api_name": "numpy.dtype", "line_number": 388, "usage_type": "call"}, {"api_name": "numpy.char.rstrip", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.char", "line_number": 395, "usage_type": "attribute"}, {"api_name": "numpy.dtype", "line_number": 428, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 450, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 451, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 505, "usage_type": "call"}, {"api_name": "fitsio.FITSHDR", "line_number": 509, "usage_type": "call"}, {"api_name": "desiutil.depend.setdep", "line_number": 510, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 510, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 511, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 511, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 512, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 512, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 513, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 513, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 516, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 516, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 520, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 520, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 522, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 522, "usage_type": "name"}, {"api_name": "numpy.radians", "line_number": 526, "usage_type": "call"}, {"api_name": "healpy.ang2pix", "line_number": 527, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions.append_fields", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions", "line_number": 528, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 534, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 534, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 535, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 535, "usage_type": "attribute"}, {"api_name": "fitsio.write", "line_number": 552, "usage_type": "call"}, {"api_name": "fitsio.FITSHDR", "line_number": 585, "usage_type": "call"}, {"api_name": "desiutil.depend.setdep", "line_number": 586, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 586, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 587, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 587, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 590, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 590, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 595, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 595, "usage_type": "name"}, {"api_name": "numpy.radians", "line_number": 608, "usage_type": "call"}, {"api_name": "healpy.ang2pix", "line_number": 609, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions.append_fields", "line_number": 610, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions", "line_number": 610, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 616, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 616, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 617, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 617, "usage_type": "attribute"}, {"api_name": "fitsio.write", "line_number": 619, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions.rename_fields", "line_number": 645, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions", "line_number": 645, "usage_type": "name"}, {"api_name": "fitsio.FITSHDR", "line_number": 648, "usage_type": "call"}, {"api_name": "desiutil.depend.setdep", "line_number": 649, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 649, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 650, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 650, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 653, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 653, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 658, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 658, "usage_type": "name"}, {"api_name": "numpy.radians", "line_number": 662, "usage_type": "call"}, {"api_name": "healpy.ang2pix", "line_number": 663, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions.append_fields", "line_number": 664, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions", "line_number": 664, "usage_type": "name"}, {"api_name": "fitsio.write", "line_number": 678, "usage_type": "call"}, {"api_name": "fitsio.FITSHDR", "line_number": 705, "usage_type": "call"}, {"api_name": "desiutil.depend.setdep", "line_number": 706, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 706, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 707, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 707, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 710, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 710, "usage_type": "name"}, {"api_name": "desiutil.depend.setdep", "line_number": 715, "usage_type": "call"}, {"api_name": "desiutil.depend", "line_number": 715, "usage_type": "name"}, {"api_name": "glob.iglob", "line_number": 719, "usage_type": "call"}, {"api_name": "fitsio.read_header", "line_number": 724, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 736, "usage_type": "call"}, {"api_name": "healpy.ang2pix", "line_number": 737, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions.append_fields", "line_number": 738, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions", "line_number": 738, "usage_type": "name"}, {"api_name": "fitsio.write", "line_number": 746, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 753, "usage_type": "call"}, {"api_name": "os.path", "line_number": 753, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 754, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 757, "usage_type": "call"}, {"api_name": "os.path", "line_number": 757, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 759, "usage_type": "call"}, {"api_name": "os.path", "line_number": 759, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 769, "usage_type": "call"}, {"api_name": "os.path", "line_number": 769, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 787, "usage_type": "call"}, {"api_name": "os.path", "line_number": 787, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 839, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 840, "usage_type": "call"}, {"api_name": "os.path", "line_number": 840, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 873, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 874, "usage_type": "call"}, {"api_name": "os.path", "line_number": 874, "usage_type": "attribute"}, {"api_name": "distutils.version.LooseVersion", "line_number": 902, "usage_type": "call"}, {"api_name": "fitsio.__version__", "line_number": 902, "usage_type": "attribute"}, {"api_name": "fitsio.__version__.startswith", "line_number": 903, "usage_type": "call"}, {"api_name": "fitsio.__version__", "line_number": 903, "usage_type": "attribute"}, {"api_name": "fitsio.__version__", "line_number": 906, "usage_type": "attribute"}, {"api_name": "fitsio.read", "line_number": 920, "usage_type": "call"}, {"api_name": "numpy.char.rstrip", "line_number": 933, "usage_type": "call"}, {"api_name": "numpy.char", "line_number": 933, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 962, "usage_type": "call"}, {"api_name": "os.path", "line_number": 962, "usage_type": "attribute"}, {"api_name": "fitsio.read", "line_number": 965, "usage_type": "call"}, {"api_name": "healpy.npix2nside", "line_number": 969, "usage_type": "call"}, {"api_name": "healpy.pixelfunc.ud_grade", "line_number": 975, "usage_type": "call"}, {"api_name": "healpy.pixelfunc", "line_number": 975, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 1007, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1007, "usage_type": "attribute"}, {"api_name": "fitsio.read", "line_number": 1010, "usage_type": "call"}, {"api_name": "healpy.npix2nside", "line_number": 1014, "usage_type": "call"}, {"api_name": "healpy.nside2npix", "line_number": 1020, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1021, "usage_type": "call"}, {"api_name": "healpy.pixelfunc.ud_grade", "line_number": 1025, "usage_type": "call"}, {"api_name": "healpy.pixelfunc", "line_number": 1025, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 1029, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 1039, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 1040, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 1040, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1040, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 1042, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 1042, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 1042, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 1046, "usage_type": "call"}, {"api_name": "fitsio.FITS", "line_number": 1083, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 1132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1132, "usage_type": "attribute"}, {"api_name": "desitarget.geomask.hp_in_box", "line_number": 1147, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 1181, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1181, "usage_type": "attribute"}, {"api_name": "fitsio.read_header", "line_number": 1184, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1191, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 1193, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 1197, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1208, "usage_type": "call"}, {"api_name": "healpy.nside2npix", "line_number": 1208, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 1255, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1255, "usage_type": "attribute"}, {"api_name": "desitarget.geomask.nside2nside", "line_number": 1260, "usage_type": "call"}, {"api_name": "fitsio.read", "line_number": 1272, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 1273, "usage_type": "call"}, {"api_name": "fitsio.read", "line_number": 1276, "usage_type": "call"}, {"api_name": "desitarget.geomask.is_in_hp", "line_number": 1279, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions.drop_fields", "line_number": 1281, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions", "line_number": 1281, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 1320, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1320, "usage_type": "attribute"}, {"api_name": "desitarget.geomask.pixarea2nside", "line_number": 1322, "usage_type": "call"}, {"api_name": "desitarget.geomask.box_area", "line_number": 1322, "usage_type": "call"}, {"api_name": "desitarget.geomask.hp_in_box", "line_number": 1325, "usage_type": "call"}, {"api_name": "fitsio.read", "line_number": 1332, "usage_type": "call"}, {"api_name": "desitarget.geomask.is_in_box", "line_number": 1335, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions.drop_fields", "line_number": 1337, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions", "line_number": 1337, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 1375, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1375, "usage_type": "attribute"}, {"api_name": "desitarget.geomask.pixarea2nside", "line_number": 1377, "usage_type": "call"}, {"api_name": "desitarget.geomask.cap_area", "line_number": 1377, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 1377, "usage_type": "call"}, {"api_name": "desitarget.geomask.hp_in_cap", "line_number": 1380, "usage_type": "call"}, {"api_name": "fitsio.read", "line_number": 1387, "usage_type": "call"}, {"api_name": "desitarget.geomask.is_in_cap", "line_number": 1390, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions.drop_fields", "line_number": 1392, "usage_type": "call"}, {"api_name": "numpy.lib.recfunctions", "line_number": 1392, "usage_type": "name"}]} +{"seq_id": "640086278", "text": "import pyttsx3\r\n\r\n#get text from user\r\ntext = input('What would you like your computer to say?\\n')\r\n#initialize engine and speak\r\ndef textToSpeech(text):\r\n engine = pyttsx3.init()\r\n engine.say(text)\r\n engine.runAndWait()\r\n\r\ntextToSpeech(text)", "sub_path": "text-to-speech.py", "file_name": "text-to-speech.py", "file_ext": "py", "file_size_in_byte": 251, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pyttsx3.init", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "242109505", "text": "from tkinter import Tk,Label,Button,Entry,Frame,filedialog,messagebox,Checkbutton,StringVar,BooleanVar,IntVar\nimport tkinter as tk\nfrom MouseArduino import MouseArduino\nfrom Decision import Decision\nimport json\nimport sys\nimport threading\nimport Utilities\nimport os\nimport time\nimport Sound\n\nLARGE_FONT = (\"Verdana\", 11)\nBACKGROUND_COLOR = '#e1d5ce'\nHEADER_COLOR = '#e0a989'\nLABEL_COLOR = '#2cc7a0'\nTEXT_FONT = 'helvetica 9 bold'\nLICK_OFF_COLOR= '#6b6b6b'\nLICK_FONT = 'Verdana 40 bold'\nON_COLOR = 'green'\nOFF_COLOR = 'red'\nSTATUS_FONT = 'Verdana 15 bold'\nclass DecisionGui:\n def __init__(self,root=Tk()):\n self.root = root\n self.root.title(\"Decision Experiment\")\n self.root.configure(bg=BACKGROUND_COLOR)\n self.titleLabel = Label(self.root,text='Decision Experiment',font=STATUS_FONT,bg=BACKGROUND_COLOR)\n self.ard = None\n self.experiment = None\n self.isConfigLoaded = False\n self.isArdConnected = False\n self.isPumpOn = False\n self.estTime = StringVar()\n self.lickCount = IntVar()\n self.lickCount.set(0)\n self.isSoundOn = BooleanVar()\n self.stopUpdating = threading.Event()\n self.ardUpdater = threading.Thread(target=self.updateVariable)\n port = MouseArduino.getUnoPort()\n\n\n #Frames\n self.master = Frame(root,bg=BACKGROUND_COLOR)\n self.master.grid_rowconfigure(0)\n self.master.grid_rowconfigure(1)\n self.master.grid_rowconfigure(2, weight=5)\n self.master.grid_columnconfigure(0, weight=1)\n self.master.grid_columnconfigure(1, weight=1)\n self.master.grid_columnconfigure(2, weight=1)\n self.ardInitFrame = Frame(self.master,bd=3,relief='groove',bg=BACKGROUND_COLOR)\n self.ardControlFrame = Frame(self.master,bd=3,relief='groove',bg=BACKGROUND_COLOR)\n self.initFrame = Frame(self.master,bd=3,relief='groove',bg=BACKGROUND_COLOR)\n self.argFrame = Frame(self.master,bd=3,relief='groove',bg=BACKGROUND_COLOR)\n self.finalControlFrame = Frame(self.master,bd=3,relief='groove',bg=BACKGROUND_COLOR)\n\n\n #ardInitFrame\n self.ardInitFrameLabel = Label(self.ardInitFrame,text=\"Connect to Hardware\",bg=HEADER_COLOR,font=LARGE_FONT,fg='black',borderwidth=2,width=40)\n self.comLabel = Label(self.ardInitFrame,bg=BACKGROUND_COLOR,text=\"Com Port:\",font=TEXT_FONT)\n self.comEntry = Entry(self.ardInitFrame,font=TEXT_FONT)\n self.baudrateLabel = Label(self.ardInitFrame,bg=BACKGROUND_COLOR,text=\"Baudrate:\",font=TEXT_FONT)\n self.baudrateEntry = Entry(self.ardInitFrame,font=TEXT_FONT)\n self.connectButton = Button(self.ardInitFrame,text=\"Connect\",font=STATUS_FONT,command=self.connect)\n self.ardInitFrameLabel.grid(row=0,columnspan=2,padx=40,pady=10)\n self.comLabel.grid(row=1,column=0,sticky=tk.E)\n self.comEntry.grid(row=1,column=1,sticky=tk.W)\n self.baudrateLabel.grid(row=2,column=0,sticky=tk.E)\n self.baudrateEntry.grid(row=2,column=1,sticky=tk.W)\n self.connectButton.grid(row=3,columnspan=2,pady=10)\n self.comEntry.insert(0,port)\n self.baudrateEntry.insert(0,115200)\n \n #ardControlFrame\n self.ardControlFrameLabel = Label(self.ardControlFrame,text='Pre-experiment Control',bg=HEADER_COLOR,font=LARGE_FONT,fg='black',borderwidth=2,width=40)\n self.sendStringEntry = Entry(self.ardControlFrame,font=TEXT_FONT,width=20)\n self.sendStringButton = Button(self.ardControlFrame,text='Send String',font=TEXT_FONT,bg=BACKGROUND_COLOR,command=self.sendString)\n self.rewardButton = Button(self.ardControlFrame,text='Reward(R)',font=STATUS_FONT,width=10,bg=BACKGROUND_COLOR,command=self.deliverReward,height=1)\n self.pumpButton = Button(self.ardControlFrame,text='Pump Water(P)',font=STATUS_FONT,command=self.togglePump,bg=OFF_COLOR,width=12,height=1)\n self.lickLabel = Label(self.ardControlFrame,text='LICK',bg=LICK_OFF_COLOR,font=LICK_FONT,width=10,height=1)\n self.lickCountLabel = Label(self.ardControlFrame,text='Lick Count :',bg=BACKGROUND_COLOR,font=LARGE_FONT)\n self.lickCountButton = Button(self.ardControlFrame,textvariable=self.lickCount,font=LARGE_FONT,bg=BACKGROUND_COLOR,command=lambda : self.lickCount.set(0))\n self.soundCheckButton = Checkbutton(self.ardControlFrame,text='Lick Sound',variable=self.isSoundOn,bg=BACKGROUND_COLOR)\n\n self.ardControlFrameLabel.grid(row=0,columnspan=2,padx=40,pady=10)\n self.sendStringEntry.bind('',self.sendString)\n self.sendStringEntry.grid(row=1,column=0,padx=5,sticky=tk.E)\n self.sendStringEntry.bind('',lambda x: self.master.focus())\n self.sendStringButton.grid(row=1,column=1,padx=5,sticky=tk.W)\n self.rewardButton.grid(row=2,column=0,pady=10)\n self.pumpButton.grid(row=2,column=1,pady=10)\n self.lickLabel.grid(row=3,columnspan=2,pady=15)\n self.lickCountLabel.grid(row=4,column=0,sticky=tk.E)\n self.lickCountButton.grid(row=4,column=1,sticky=tk.W)\n self.soundCheckButton.grid(row=5,columnspan=2)\n\n #initFrame\n self.initFrameLabel = Label(self.initFrame,text=\"Session Configuration\",font=LARGE_FONT,bg=HEADER_COLOR,fg='black',borderwidth=2,width=40)\n self.loadButton = Button(self.initFrame,text=\"Load Config(L)\",font=STATUS_FONT,command=self.selectFile)\n self.sessionNameLabel = Label(self.initFrame,text=\"Session Name:\",font=TEXT_FONT,bg=BACKGROUND_COLOR)\n self.sessionNameEntry = Entry(self.initFrame,font=TEXT_FONT)\n self.numOfTrialsLabel = Label(self.initFrame,text=\"Number of Trials:\",font=TEXT_FONT,bg=BACKGROUND_COLOR)\n self.numOfTrialsEntry = Entry(self.initFrame,font=TEXT_FONT)\n self.numOfTrialsEntry.bind('',self.updateTime)\n self.numOfTrialsEntry.bind('',lambda x: self.master.focus())\n self.initFrameLabel.grid(row=0,columnspan=2,padx=40,pady=10)\n self.sessionNameLabel.grid(row=1,column=0,sticky=tk.E)\n self.sessionNameEntry.grid(row=1,column=1,sticky=tk.W)\n self.sessionNameEntry.bind('',lambda x: self.master.focus())\n self.numOfTrialsLabel.grid(row=2,column=0,sticky=tk.E)\n self.numOfTrialsEntry.grid(row=2,column=1,sticky=tk.W)\n self.loadButton.grid(row=3,columnspan=2,pady=10)\n\n #finalControlFrame\n self.finalControlFrameLabel = Label(self.finalControlFrame,text='Experiment Control',bg=HEADER_COLOR,font=LARGE_FONT,fg='black',bd=2,width=40)\n self.estTimeLabel = Label(self.finalControlFrame,textvariable=self.estTime,font=STATUS_FONT,bg=BACKGROUND_COLOR)\n self.startButton = Button(self.finalControlFrame,text=\"START EXPERIMENT\",font='Helvetica 20 bold',command=self.startExperiment)\n self.finalControlFrameLabel.grid(padx=40,pady=10)\n self.estTimeLabel.grid(pady=10)\n self.startButton.grid(pady=15)\n\n\n\n #master\n self.titleLabel.pack(pady = 5)\n self.master.pack(padx=20,pady=20)\n self.initFrame.grid(row=0,column=0)\n self.ardInitFrame.grid(row=1,column=0)\n self.finalControlFrame.grid(row=2,column=0,sticky='NSWE')\n self.argFrame.grid(row=0,column=1,rowspan=3,sticky='NSWE')\n for frame in [self.master,self.initFrame,self.ardInitFrame,self.finalControlFrame,self.argFrame]:\n frame.bind('r',self.deliverReward)\n frame.bind('p',self.togglePump)\n frame.bind('l',self.selectFile)\n frame.bind('R',self.deliverReward)\n frame.bind('P',self.togglePump)\n frame.bind('L',self.selectFile)\n frame.bind(\"\",lambda e: self.master.focus_set())\n self.updateTime()\n\n\n\n\n\n def run(self):\n self.master.mainloop()\n \n def selectFile(self,event=None):\n fileName = filedialog.askopenfilename() \n self.configFileName = fileName\n for widget in self.argFrame.winfo_children():\n widget.destroy()\n self.argFrameLabel = Label(self.argFrame,text=\"Experiment Configuration: \"+os.path.basename(fileName),font=LARGE_FONT,bg=HEADER_COLOR,fg='black',bd=2,width=40).grid(columnspan=2,padx=40,pady=10)\n try:\n with open(fileName) as f:\n self.args = json.load(f)\n except Exception as e:\n print(e)\n argToLen = lambda x: len(str(x))\n maxArgNameLength = argToLen(max(self.args.keys(),key=lambda x: argToLen(x)))\n maxArgValueLength = argToLen(max(self.args.values(),key=lambda x: argToLen(x)))\n self.trialDuration = self.args[\"Rule duration\"] +\\\n self.args[\"Delay duration\"] +\\\n self.args[\"Stimulus duration\"] + \\\n self.args[\"Wrong response flash duration\"] + \\\n self.args[\"Wrong response rest duration\"] \n for i,(argName,value) in enumerate(sorted(self.args.items(),key=lambda item: item[0])):\n lName = Label(self.argFrame,text=str(argName)+ \" :\",font='Helvetica 12 bold',bg=BACKGROUND_COLOR).grid(row=i+3,column=0,sticky=tk.E)\n lValue = Label(self.argFrame,text=str(value),bg=BACKGROUND_COLOR).grid(row=i+3,column=1,sticky=tk.W,)\n self.updateTime()\n self.isConfigLoaded = True\n\n def connect(self):\n try:\n comport = self.comEntry.get()\n baudrate = self.baudrateEntry.get()\n if comport == \"\" or baudrate == \"\":\n raise Exception(\"Please fill in all values\")\n baudrate = int(baudrate)\n self.ard = MouseArduino(comport,baudrate)\n self.ard.start()\n self.ardInitFrame.destroy()\n self.ardUpdater.start()\n self.ardControlFrame.grid(row=1,column=0)\n self.isArdConnected=True\n except Exception as e:\n messagebox.showerror(\"Error\",\"Could not connect to Arduino. Make sure port is correct or other program isn't grabbing the port :\"+str(e))\n \n\n def deliverReward(self,event=None):\n self.ard.deliverReward()\n def sendString(self,event=None):\n self.ard.write(self.sendStringEntry.get())\n self.sendStringEntry.delete(0,'end')\n\n def updateVariable(self):\n while not self.stopUpdating.is_set():\n if self.ard.newMsg.wait(1):\n while not self.ard.msgQueue.empty():\n self.ard.newMsg.clear()\n msg = self.ard.msgQueue.get()\n print(msg)\n args = Utilities.parse(msg)\n arg = args[1].strip()\n if arg == 'LK':\n self.lickCount.set(self.lickCount.get() + 1)\n self.lickLabel.configure(bg=ON_COLOR)\n if self.isSoundOn.get():\n Sound.cue(0.05)\n time.sleep(0.2)\n self.lickLabel.configure(bg=LICK_OFF_COLOR)\n elif arg == 'startpump':\n self.pumpButton.configure(bg=ON_COLOR)\n self.isPumpOn = True\n elif arg == 'stoppump':\n self.pumpButton.configure(bg=OFF_COLOR)\n self.isPumpOn = False\n\n def togglePump(self,event=None):\n if self.isPumpOn:\n self.ard.stopPump()\n else:\n self.ard.startPump()\n\n def updateTime(self,event=None):\n numOfTrials = self.numOfTrialsEntry.get()\n try:\n totalDuration = self.trialDuration * int(numOfTrials)\n tmin = totalDuration // 60\n tsec = totalDuration % 60\n timeStr = \"{:.0f} Min {:.0f} Sec\".format(tmin,tsec)\n except Exception as e:\n timeStr = \"\"\n print(e)\n self.estTime.set(\"Estimated duration: {:>10}\".format(timeStr))\n \n def startExperiment(self):\n if not self.isConfigLoaded:\n messagebox.showerror(\"Error\",\"Please load configuration file\")\n elif not self.isArdConnected:\n messagebox.showerror(\"Error\",\"Please connect Arduino\")\n else:\n try:\n sessionName = self.sessionNameEntry.get()\n numOfTrials = self.numOfTrialsEntry.get()\n if sessionName == \"\" or numOfTrials == \"\":\n raise Exception(\"Please fill in all values\")\n numOfTrials = int(numOfTrials)\n self.experiment = Decision(self.ard)\n self.experiment.startExperiment(sessionName,numOfTrials,self.configFileName)\n except Exception as e:\n messagebox.showerror(\"Error\",e)\n\nif __name__ == '__main__':\n try:\n gui = DecisionGui()\n gui.run()\n if gui.ardUpdater.is_alive():\n gui.stopUpdating.set()\n if gui.ard:\n gui.ard.stop()\n except Exception as e:\n print(e)\n input(\"Press Enter to continue\")\n\n\n", "sub_path": "python/DecisionGui.py", "file_name": "DecisionGui.py", "file_ext": "py", "file_size_in_byte": 12974, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "tkinter.Tk", "line_number": 24, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 28, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 34, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 35, "usage_type": "call"}, {"api_name": "tkinter.BooleanVar", "line_number": 37, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 38, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 39, "usage_type": "call"}, {"api_name": "MouseArduino.MouseArduino.getUnoPort", "line_number": 40, "usage_type": "call"}, {"api_name": "MouseArduino.MouseArduino", "line_number": 40, "usage_type": "name"}, {"api_name": "tkinter.Frame", "line_number": 44, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 51, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 52, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 53, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 54, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 55, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 59, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 60, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 61, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 62, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 64, "usage_type": "call"}, {"api_name": "tkinter.E", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tkinter.E", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 75, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 76, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 77, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 78, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 79, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 80, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 81, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 82, "usage_type": "call"}, {"api_name": "tkinter.Checkbutton", "line_number": 83, "usage_type": "call"}, {"api_name": "tkinter.E", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tkinter.E", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 98, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 99, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 100, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 101, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 102, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 103, "usage_type": "call"}, {"api_name": "tkinter.E", "line_number": 107, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 108, "usage_type": "attribute"}, {"api_name": "tkinter.E", "line_number": 110, "usage_type": "attribute"}, {"api_name": "tkinter.W", "line_number": 111, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 115, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 116, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 117, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 149, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 149, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 156, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 168, "usage_type": "call"}, {"api_name": "tkinter.E", "line_number": 168, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 169, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 169, "usage_type": "attribute"}, {"api_name": "MouseArduino.MouseArduino", "line_number": 180, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 187, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 187, "usage_type": "name"}, {"api_name": "Utilities.parse", "line_number": 203, "usage_type": "call"}, {"api_name": "Sound.cue", "line_number": 209, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 210, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 239, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 239, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 241, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 241, "usage_type": "name"}, {"api_name": "Decision.Decision", "line_number": 249, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 252, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 252, "usage_type": "name"}]} +{"seq_id": "307656695", "text": "import airflow\nfrom datetime import datetime\n\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.hive_operator import HiveOperator\n\n\nfrom airflow.contrib.operators.ssh_operator import SSHOperator\nfrom airflow.contrib.hooks.ssh_hook import SSHHook\n\nargs = {\n 'owner': 'vit',\n # when set to True, keeps a task from getting triggered if the previous schedule for the task failed\n 'depends_on_past': True,\n # Dict of global variables to be used within DAG\n 'provide_context': True,\n #\n 'start_date': datetime.now(),\n 'end_date': datetime(2050, 1, 1)\n}\n\n\ndef dummy_python_operator(ds, **kwards):\n return 'Hello from pythonic world'\n\n\nwith airflow.DAG(\n dag_id='panjiva',\n schedule_interval=\"@once\",\n default_args=args\n) as dag:\n ############################################################\n # Service dummy operators\n starter = DummyOperator(\n task_id='launcher',\n retries=3,\n dag=dag\n )\n\n finisher = DummyOperator(\n task_id='finisher',\n trigger_rule='all_success',\n dag=dag\n )\n\n wait_unzipping = DummyOperator(\n task_id='local-achieves-barrier',\n trigger_rule='all_success',\n dag=dag\n )\n\n wait_uploading = DummyOperator(\n task_id='hive-uploading-barrier',\n trigger_rule='all_success',\n dag=dag\n )\n\n export_barrier = DummyOperator(\n task_id='export-downloading-barrier',\n trigger_rule='all_success',\n dag=dag\n )\n\n import_barrier = DummyOperator(\n task_id='import-downloading-barrier',\n trigger_rule='all_success',\n dag=dag\n )\n\n sql_barrier = DummyOperator(\n task_id='construct-historical-tables-barrier',\n trigger_rule='all_success',\n dag=dag\n )\n\n aggregated_stats_barrier = DummyOperator(\n task_id='construct-aggregated-tables-barrier',\n trigger_rule='all_success',\n dag=dag\n )\n\n hadoop_hook = SSHHook(\n remote_host='10.1.25.37',\n username='kashchenko',\n password='pwd',\n timeout=30\n )\n ############################################################\n '''\n # Use bash scripts if there is an interconnect with Hadoop/HDFS\n download_import_data = BashOperator(\n task_id='download-panjiva-import-data',\n bash_command='${AIRFLOW_HOME}/dags/downloader-scripts/download-import-data.sh ',\n dag=dag\n )\n \n download_export_data = BashOperator(\n task_id='download-panjiva-export-data',\n bash_command='${AIRFLOW_HOME}/dags/downloader-scripts/download-export-data.sh ',\n dag=dag\n )\n \n parse_and_put = BashOperator(\n task_id='parse-panjiva-archievies-and-put-2hdfs',\n bash_command='${AIRFLOW_HOME}/dags/parsing-scripts/parse-panjiva-and-put.sh ',\n dag=dag\n )\n #\n '''\n\n download_import_data = SSHOperator(\n task_id='download-panjiva-import-data',\n remote_host='10.1.25.37',\n ssh_hook=hadoop_hook,\n command=u'/data/demo/download-import-data.sh '\n )\n\n download_export_data = SSHOperator(\n task_id='download-panjiva-export-data',\n remote_host='10.1.25.37',\n ssh_hook=hadoop_hook,\n command=u'/data/demo/download-export-data.sh '\n )\n\n parse_and_put = SSHOperator(\n task_id='parse-panjiva-archievies-and-put-2hdfs',\n remote_host='10.1.25.37',\n ssh_hook=hadoop_hook,\n command=u'/data/demo/parse-panjiva-and-put.sh ',\n dag=dag\n )\n\n starter >> download_export_data >> export_barrier >> parse_and_put\n starter >> download_import_data >> import_barrier >> parse_and_put\n\n upload_exp_data = SSHOperator(\n task_id='upload-export-data-to-hive',\n remote_host='10.1.25.37',\n ssh_hook=hadoop_hook,\n command=u'echo $HOSTNAME',\n dag=dag\n )\n\n upload_imp_data = SSHOperator(\n task_id='upload-import-data-to-hive',\n remote_host='10.1.25.37',\n ssh_hook=hadoop_hook,\n command=u'/data/demo/submit-df-handler.sh ',\n dag=dag\n )\n\n parse_and_put >> wait_unzipping >> upload_exp_data >> wait_uploading\n parse_and_put >> wait_unzipping >> upload_imp_data >> wait_uploading\n\n companies_info = HiveOperator(\n task_id='united-companies-table',\n # hql='${AIRFLOW_HOME}/sql/companies_info.sql ',\n hql='show tables',\n hive_cli_conn_id='hive'\n )\n\n consignments_info = HiveOperator(\n task_id='separate-consignments-table',\n # hql='${AIRFLOW_HOME}/sql/consignments.sql ',\n hql='show tables',\n hive_cli_conn_id='hive'\n )\n\n wait_uploading >> companies_info >> consignments_info >> sql_barrier\n\n hscodes_stats = HiveOperator(\n task_id='hscodes-stats-table',\n # hql='${AIRFLOW_HOME}/sql/import_stats_by_hscodes.sql ',\n hql='show tables',\n hive_cli_conn_id='hive'\n )\n\n companies_hscodes_stats = HiveOperator(\n task_id='hscodes-companies-stats-table',\n # hql='${AIRFLOW_HOME}/sql/import_stats_by_companies_hscodes.sql ',\n hql='show tables',\n hive_cli_conn_id='hive'\n )\n\n sql_barrier >> hscodes_stats >> aggregated_stats_barrier\n sql_barrier >> companies_hscodes_stats >> aggregated_stats_barrier\n\n dm_companies = HiveOperator(\n task_id='dm-toplevel-companies-search-table',\n # hql='${AIRFLOW_HOME}/sql/dm_toplevel_companies_search.sql ',\n hql='show tables',\n hive_cli_conn_id='hive'\n )\n\n aggregated_stats_barrier >> dm_companies >> finisher", "sub_path": "dags/panjiva.py", "file_name": "panjiva.py", "file_ext": "py", "file_size_in_byte": 5651, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "call"}, {"api_name": "airflow.DAG", "line_number": 28, "usage_type": "call"}, {"api_name": "airflow.operators.dummy_operator.DummyOperator", "line_number": 35, "usage_type": "call"}, {"api_name": "airflow.operators.dummy_operator.DummyOperator", "line_number": 41, "usage_type": "call"}, {"api_name": "airflow.operators.dummy_operator.DummyOperator", "line_number": 47, "usage_type": "call"}, {"api_name": "airflow.operators.dummy_operator.DummyOperator", "line_number": 53, "usage_type": "call"}, {"api_name": "airflow.operators.dummy_operator.DummyOperator", "line_number": 59, "usage_type": "call"}, {"api_name": "airflow.operators.dummy_operator.DummyOperator", "line_number": 65, "usage_type": "call"}, {"api_name": "airflow.operators.dummy_operator.DummyOperator", "line_number": 71, "usage_type": "call"}, {"api_name": "airflow.operators.dummy_operator.DummyOperator", "line_number": 77, "usage_type": "call"}, {"api_name": "airflow.contrib.hooks.ssh_hook.SSHHook", "line_number": 83, "usage_type": "call"}, {"api_name": "airflow.contrib.operators.ssh_operator.SSHOperator", "line_number": 112, "usage_type": "call"}, {"api_name": "airflow.contrib.operators.ssh_operator.SSHOperator", "line_number": 119, "usage_type": "call"}, {"api_name": "airflow.contrib.operators.ssh_operator.SSHOperator", "line_number": 126, "usage_type": "call"}, {"api_name": "airflow.contrib.operators.ssh_operator.SSHOperator", "line_number": 137, "usage_type": "call"}, {"api_name": "airflow.contrib.operators.ssh_operator.SSHOperator", "line_number": 145, "usage_type": "call"}, {"api_name": "airflow.operators.hive_operator.HiveOperator", "line_number": 156, "usage_type": "call"}, {"api_name": "airflow.operators.hive_operator.HiveOperator", "line_number": 163, "usage_type": "call"}, {"api_name": "airflow.operators.hive_operator.HiveOperator", "line_number": 172, "usage_type": "call"}, {"api_name": "airflow.operators.hive_operator.HiveOperator", "line_number": 179, "usage_type": "call"}, {"api_name": "airflow.operators.hive_operator.HiveOperator", "line_number": 189, "usage_type": "call"}]} +{"seq_id": "31065313", "text": "from PyQt5 import QtCore, QtGui, QtWidgets #导入模块\n\nclass Ui_Form(object): #创建窗口类,继承object\n def setupUi(self, Form):\n Form.setObjectName(\"Form\") #设置窗口名\n Form.resize(400, 300) #设置窗口大小\n self.quitButton = QtWidgets.QPushButton(Form) #创建一个按钮,并将按钮加入到窗口Form中\n self.quitButton.setGeometry(QtCore.QRect(280, 240, 75, 23)) #设置按钮大小与位置\n self.quitButton.setObjectName(\"quitButton\") #设置按钮名\n self.textBrowser = QtWidgets.QTextBrowser(Form)\n self.textBrowser.setGeometry(QtCore.QRect(20, 60, 256, 61))\n self.textBrowser.setObjectName(\"textBrowser\")\n self.pushButton = QtWidgets.QPushButton(Form)\n self.pushButton.setGeometry(QtCore.QRect(300, 70, 93, 28))\n self.pushButton.setObjectName(\"pushButton\")\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form) #关联信号槽\n\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Test\")) #设置窗口标题\n self.quitButton.setText(_translate(\"Form\", \"Quit\")) #设置按钮显示文字", "sub_path": "UI1.py", "file_name": "UI1.py", "file_ext": "py", "file_size_in_byte": 1515, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 7, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 7, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 8, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 8, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTextBrowser", "line_number": 10, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 10, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 11, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 13, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 13, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 14, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 14, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QMetaObject.connectSlotsByName", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QMetaObject", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 18, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QCoreApplication", "line_number": 22, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "271733732", "text": "import string\n\nimport numpy as np\n\nfrom pymoo.algorithms.nsga2 import NSGA2\nfrom pymoo.model.crossover import Crossover\nfrom pymoo.model.duplicate import ElementwiseDuplicateElimination\nfrom pymoo.model.mutation import Mutation\nfrom pymoo.model.problem import Problem\nfrom pymoo.model.sampling import Sampling\nfrom pymoo.optimize import minimize\nfrom pymoo.visualization.scatter import Scatter\nfrom pyitlib import discrete_random_variable as drv\nimport math\nimport random\nfrom classifier import binaryClassifier\n\n\nclass MyProblem(Problem):\n\n def __init__(self):\n c = binaryClassifier()\n self.classifier = c\n self.stat = []\n self.length_gene = 100\n self.n_genes = 8\n self.bounds = ((17,90),(1,16),(0,1),(0,4),(0,1),(0,99999),(0,4356),(1,99)) # variables' original bounds (int)\n\n super().__init__(n_var=self.n_genes, \n n_obj=2, \n n_constr=1, \n elementwise_evaluation=True)\n\n def _evaluate(self, chromosome, out, *args, **kwargs):\n\n y=[]\n #predict\n #len(chromosome) = 30 (observations)\n for i in range(self.length_gene):\n y.append(self.classifier.predict([chromosome[i]]))\n \n #compute mutual array using y\n M=[]\n ch = np.array(chromosome)\n \n for i in range(self.num_genes):\n M.append(drv.information_mutual(ch[:,i],np.array(y),cartesian_product=True))\n \n #dictionary Variables-MI\n Var = [\"age\",\"education.num\",\"marital.status\",\"race\",\"sex\",\"capital.gain\",\"capital.loss\",\"hours.per.week\"]\n\n d = {\"\".join(Var[0]):M[0]}\n #print(d)\n\n for i in range(len(Var)):\n d[\"\".join(Var[i])] = M[i]\n #print(d)\n\n sort_orders = sorted(d.items(), key=lambda x: x[1], reverse=True)\n #print(sort_orders)\n\n best = []\n\n den = 0\n num = 0\n res = 0\n summary = 0\n summary = sum(M)\n threshold = summary * 0.5\n \n M.sort(reverse=True)\n #print(M)\n\n #den = 0\n while num < threshold:\n num=num+M[den]\n den=den+1\n\n for i in range(den):\n best.append(sort_orders[i])\n self.stat.append([res,den,best])\n\n f1 = num\n f2 = -den\n g1 = den<0\n\n out[\"F\"] = [f1, f2]\n out[\"G\"] = [g1]\n\nclass MySampling(Sampling):\n\n def _do(self, problem, n_samples, **kwargs):\n\n pop = []\n\n for i in range (n_samples):\n X = np.full((problem.length_gene, problem.n_genes), 0, int)\n\n for i in range(len(X)):\n for j in range(len(X[i])):\n X[i][j] = random.randint(problem.bounds[j][0],problem.bounds[j][1])\n pop.append(X)\n\n print(\"population:\", pop)\n return pop[0]\n\n# class MyCrossover(Crossover):\n# def __init__(self):\n# super().__init__(2, 2)\n\n# def _do(self, problem, X, parents, **kwargs):\n\n# # a,b = example_parents(2,8)\n\n# # print(\"One Point Crossover\")\n# # off = crossover(get_crossover(\"bin_one_point\"), a, b)\n# # show((off[:n_matings] != a[0]))\n\n# print(\"crossover:\", Y)\n# return Y\n\n\nalgorithm = NSGA2(pop_size=20,\n sampling=MySampling(),\n #crossover=MyCrossover(),\n #mutation=MyMutation(),\n #eliminate_duplicates=MyDuplicateElimination()\n )\n\nres = minimize(MyProblem(),\n algorithm,\n seed=1,\n verbose=True)\n\nScatter().add(res.F).show()\nprint(res.X[np.argsort(res.F[:, 0])])", "sub_path": "pymoo/usage/multi-obj-phd/adult-ml.py", "file_name": "adult-ml.py", "file_ext": "py", "file_size_in_byte": 3654, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pymoo.model.problem.Problem", "line_number": 19, "usage_type": "name"}, {"api_name": "classifier.binaryClassifier", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "pyitlib.discrete_random_variable.information_mutual", "line_number": 47, "usage_type": "call"}, {"api_name": "pyitlib.discrete_random_variable", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "pymoo.model.sampling.Sampling", "line_number": 90, "usage_type": "name"}, {"api_name": "numpy.full", "line_number": 97, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 101, "usage_type": "call"}, {"api_name": "pymoo.algorithms.nsga2.NSGA2", "line_number": 123, "usage_type": "call"}, {"api_name": "pymoo.optimize.minimize", "line_number": 130, "usage_type": "call"}, {"api_name": "pymoo.visualization.scatter.Scatter", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "412966233", "text": "import numpy as np\nfrom keras.models import Sequential\nfrom keras.datasets import mnist\nfrom keras.layers import Dense\nfrom keras.models import load_model\nfrom keras import backend\nfrom cleverhans.attacks import FastGradientMethod\nfrom cleverhans.utils_keras import KerasModelWrapper\nimport matplotlib.pyplot as plt\n\n#Loss minimizes to 0.0049 over 150 epochs using mean squared error and adam when using epsilon=0.25\n#Loss minimizes to 0.0094 over 150 epochs using mean squared error and adam when using epsilon=0.50\n\n\n#Load MNIST data and normalize to [0,1]\n(data_train, _), (data_test, _) = mnist.load_data()\ndata_train = data_train/255.0\ndata_test = data_test/255.0\n\n#Flatten dataset (New shape for training and testing set is (60000,784) and (10000, 784))\ndata_train = data_train.reshape((len(data_train), np.prod(data_train.shape[1:])))\ndata_test = data_test.reshape((len(data_test), np.prod(data_test.shape[1:])))\n\n#Load classifier model whose gradients will be used to create adversarial examples\nkeras_model = load_model('fc-100-100-10.h5')\nbackend.set_learning_phase(False)\n\n#Create adversarial examples on testing data\nsess = backend.get_session()\nepsilon = 0.50\nwrap = KerasModelWrapper(keras_model)\nfgsm = FastGradientMethod(wrap, sess=sess)\nadv_train_x = fgsm.generate_np(data_train, eps=epsilon, clip_min=0., clip_max=1.)\nadv_test_x = fgsm.generate_np(data_test, eps=epsilon, clip_min=0., clip_max=1.)\n\n#Total datasets\ndata_total_train = np.vstack([data_train, adv_train_x])\ndata_total_test = np.vstack([data_test, adv_test_x])\n\n#Create labels that correspond to clean reconstructions\nlabels_total_train = np.vstack([data_train, data_train])\nlabels_total_test = np.vstack([data_test, data_test])\n\n#Create the model\ndef autoencoder():\n\n model = Sequential()\n model.add(Dense(256, activation=None, use_bias=True, kernel_initializer=\"uniform\", input_dim=784))\n model.add(Dense(128, activation=None, kernel_initializer=\"uniform\"))\n model.add(Dense(64, activation=None, kernel_initializer=\"uniform\"))\n model.add(Dense(128, activation=None, kernel_initializer=\"uniform\"))\n model.add(Dense(256, activation=None, kernel_initializer=\"uniform\"))\n model.add(Dense(784, activation=\"sigmoid\", kernel_initializer=\"uniform\"))\n return model\n\n\n\nmodel = autoencoder()\n\n#Compile model using mean squared error as loss and adam as optimizer\nmodel.compile(loss='mean_squared_error', optimizer='adam')\n\n#Train model using input of clean and corrupted data and fit to clean reconstructions only\nmodel.fit(data_total_train, labels_total_train, validation_data=(data_total_test, labels_total_test), epochs=150, batch_size=256, shuffle=True)\n\n#Save the model\n#model.save('pp_auto_encoder_eps50.h5')\n\n#Predict reconstructions of test data\ndecoded_images = model.predict(data_total_test)\n\n#Plot samples of first 15 perturbed images before and after reconstruction\nn = 15 # how many digits we will display\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n # display original\n ax = plt.subplot(2, n, i + 1)\n plt.imshow(data_total_test[i+10000].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # display reconstruction\n ax = plt.subplot(2, n, i + 1 + n)\n plt.imshow(decoded_images[i+10000].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show()\n\n\n\n", "sub_path": "Semi-white_Box_Attack_(FGSM)/pp_auto_encoder_fgsm.py", "file_name": "pp_auto_encoder_fgsm.py", "file_ext": "py", "file_size_in_byte": 3398, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "keras.datasets.mnist.load_data", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.prod", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.backend.set_learning_phase", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 26, "usage_type": "name"}, {"api_name": "keras.backend.get_session", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 29, "usage_type": "name"}, {"api_name": "cleverhans.utils_keras.KerasModelWrapper", "line_number": 31, "usage_type": "call"}, {"api_name": "cleverhans.attacks.FastGradientMethod", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gray", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gray", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "214449927", "text": "import bpy\nimport bgl\nfrom bpy.props import FloatProperty, EnumProperty, BoolProperty\nimport bmesh\nfrom ..utils.developer import output_traceback\nfrom ..utils.ui import wrap_mouse, draw_init, draw_title, draw_prop, draw_end, popup_message, step_collection, step_enum\nfrom ..utils import MACHIN3 as m3\n\n\nmethoditems = [(\"SURFACEPOINT\", \"Surface Point\", \"\"),\n (\"PROJECT\", \"Project\", \"\"),\n (\"VERTEX\", \"Vertex\", \"\")]\n\n\nmethodict = {\"SURFACEPOINT\": \"NEAREST_SURFACEPOINT\",\n \"PROJECT\": \"PROJECT\",\n \"VERTEX\": \"NEAREST_VERTEX\"}\n\n\nclass Conform(bpy.types.Operator):\n bl_idname = \"machin3.conform\"\n bl_label = \"MACHIN3: Conform\"\n bl_options = {'REGISTER', 'UNDO'}\n bl_description = \"Transfer Normals from Stash\"\n\n method = EnumProperty(name=\"Method\", items=methoditems, default=\"SURFACEPOINT\")\n\n shrink_wrap_offset = FloatProperty(name=\"Offset\", default=0)\n\n xray = BoolProperty(name=\"X-Ray\", default=False)\n alpha = FloatProperty(name=\"Alpha\", default=0.2, min=0.1, max=1)\n\n apply_shrink_wrap = BoolProperty(name=\"Apply Shrink Wrap\", default=True)\n remove_vgroup = BoolProperty(name=\"Remove Vertex Group\", default=True)\n\n # modal\n allowmodaloffset = BoolProperty(default=False)\n\n @classmethod\n def poll(cls, context):\n active = bpy.context.active_object\n if active.MM.stashes:\n if active.mode == \"EDIT\":\n bm = bmesh.from_edit_mesh(active.data)\n return len([v for v in bm.verts if v.select]) >= 1\n\n def draw_VIEW3D(self, args):\n if self.stash.obj:\n mesh = self.stash.obj.data\n\n mx = self.active.matrix_world\n\n # offset amount depends on size of active object\n offset = sum([d for d in self.active.dimensions]) / 3 * 0.001\n\n alpha = self.alpha\n color = (1.0, 1.0, 1.0)\n\n edgecolor = (*color, alpha)\n edgewidth = 1\n\n bgl.glEnable(bgl.GL_BLEND)\n\n if self.xray:\n bgl.glDisable(bgl.GL_DEPTH_TEST)\n\n for edge in mesh.edges:\n v1 = mesh.vertices[edge.vertices[0]]\n v2 = mesh.vertices[edge.vertices[1]]\n\n # bring the coordinates into world space, and push the verts out a bit\n v1co = mx * (v1.co + v1.normal * offset)\n v2co = mx * (v2.co + v1.normal * offset)\n\n bgl.glLineWidth(edgewidth)\n bgl.glColor4f(*edgecolor)\n\n bgl.glBegin(bgl.GL_LINES)\n\n bgl.glVertex3f(*v1co)\n bgl.glVertex3f(*v2co)\n\n draw_end()\n\n def draw_HUD(self, args):\n draw_init(self, args)\n\n draw_title(self, \"Conform\")\n\n draw_prop(self, \"Stash\", \"%d/%d\" % (self.stash.index + 1, len(self.active.MM.stashes)), key=\"scroll UP/DOWN\")\n self.offset += 10\n\n if self.stash.obj:\n draw_prop(self, \"Offset\", self.shrink_wrap_offset, offset=18, active=self.allowmodaloffset, key=\"MOVE LEFT/RIGHT, toggle W\")\n draw_prop(self, \"Method\", self.method, offset=18, key=\"CTRL scroll UP/DOWN\")\n self.offset += 10\n\n draw_prop(self, \"Alpha\", self.alpha, offset=18, key=\"ALT scroll UP/DOWN\")\n draw_prop(self, \"X-Ray\", self.xray, offset=18, key=\"toggle X\")\n self.offset += 10\n\n draw_prop(self, \"Display\", self.shrink_wrap.show_viewport, offset=18, key=\"toggle D\")\n\n self.offset += 10\n draw_prop(self, \"Apply Mod\", self.apply_shrink_wrap, offset=18, key=\"toggle A\")\n if self.apply_shrink_wrap:\n draw_prop(self, \"Remove VGroup\", self.remove_vgroup, offset=18, key=\"toggle R\")\n else:\n draw_prop(self, \"INVALID\", \"Stash Object Not Found\", offset=18, HUDcolor=(1, 0, 0))\n\n draw_end()\n\n def modal(self, context, event):\n context.area.tag_redraw()\n\n # update mouse postion for HUD\n if event.type == \"MOUSEMOVE\":\n self.mouse_x = event.mouse_region_x\n self.mouse_y = event.mouse_region_y\n\n # only consider MOUSEMOVE as a trigger for main(), when modaloffset is active\n if self.allowmodaloffset:\n if event.type == \"MOUSEMOVE\":\n delta_x = self.mouse_x - self.init_mouse_x\n wrap_mouse(self, context, event, x=True)\n\n self.shrink_wrap_offset = delta_x * 0.001\n self.shrink_wrap.offset = self.shrink_wrap_offset\n\n # SELECT stash, CHANGE alpha\n\n if event.type in {'WHEELUPMOUSE', 'ONE'} and event.value == 'PRESS':\n if event.alt:\n self.alpha += 0.1\n elif event.ctrl:\n self.method = step_enum(self.method, methoditems, 1)\n self.shrink_wrap.wrap_method = methodict[self.method]\n else:\n self.stash = step_collection(self.active.MM, self.stash, \"stashes\", \"active_stash_idx\", 1)\n\n # check if the active object has moved, because shrinkwrap doesnt work in local space like data transfer, so the locations need to match\n if self.stash.obj.matrix_world != self.active.matrix_world:\n self.stash.obj.matrix_world = self.active.matrix_world\n\n self.shrink_wrap.target = self.stash.obj\n\n elif event.type in {'WHEELDOWNMOUSE', 'TWO'} and event.value == 'PRESS':\n if event.alt:\n self.alpha -= 0.1\n elif event.ctrl:\n self.method = step_enum(self.method, methoditems, -1)\n self.shrink_wrap.wrap_method = methodict[self.method]\n else:\n self.stash = step_collection(self.active.MM, self.stash, \"stashes\", \"active_stash_idx\", -1)\n\n # check if the active object has moved, because shrinkwrap doesnt work in local space like data transfer, so the locations need to match\n if self.stash.obj.matrix_world != self.active.matrix_world:\n self.stash.obj.matrix_world = self.active.matrix_world\n\n self.shrink_wrap.target = self.stash.obj\n\n # STASH OBJ\n\n if self.stash.obj:\n\n # TOGGLE display mod and xray\n\n if event.type == 'X' and event.value == 'PRESS':\n self.xray = not self.xray\n\n elif event.type == 'D' and event.value == 'PRESS':\n self.shrink_wrap.show_viewport = not self.shrink_wrap.show_viewport\n\n # TOGGLE apply mod and remove vgroup\n\n elif event.type == 'A' and event.value == 'PRESS':\n self.apply_shrink_wrap = not self.apply_shrink_wrap\n\n elif event.type == 'R' and event.value == 'PRESS':\n self.remove_vgroup = not self.remove_vgroup\n\n elif event.type == 'W' and event.value == \"PRESS\":\n if event.alt:\n self.shrink_wrap_offset = 0\n self.shrink_wrap.offset = self.shrink_wrap_offset\n self.allowmodaloffset = False\n else:\n self.allowmodaloffset = not self.allowmodaloffset\n\n # VIEWPORT control\n\n if event.type in {'MIDDLEMOUSE'}:\n return {'PASS_THROUGH'}\n\n # FINISH\n\n elif event.type in ['LEFTMOUSE', 'SPACE']:\n bpy.types.SpaceView3D.draw_handler_remove(self.VIEW3D, 'WINDOW')\n bpy.types.SpaceView3D.draw_handler_remove(self.HUD, 'WINDOW')\n\n if self.stash.obj:\n if self.apply_shrink_wrap:\n print(\" » Applying modifier '%s' to object '%s'.\" % (self.shrink_wrap.name, self.active.name))\n bpy.ops.object.modifier_apply(apply_as='DATA', modifier=self.shrink_wrap.name)\n\n if self.remove_vgroup:\n print(\" » Removing vertex group: %s\" % (self.vgroup.name))\n self.active.vertex_groups.remove(self.vgroup)\n else:\n print(\" » Removing modifier '%s' from object '%s'.\" % (self.shrink_wrap.name, self.active.name))\n self.active.modifiers.remove(self.shrink_wrap)\n print(\" » Removing vertex group: %s\" % (self.vgroup.name))\n self.active.vertex_groups.remove(self.vgroup)\n\n m3.set_mode(\"EDIT\")\n return {'FINISHED'}\n\n # CANCEL\n\n elif event.type in {'RIGHTMOUSE', 'ESC'}:\n bpy.types.SpaceView3D.draw_handler_remove(self.VIEW3D, 'WINDOW')\n bpy.types.SpaceView3D.draw_handler_remove(self.HUD, 'WINDOW')\n\n print(\" » Removing modifier '%s' from object '%s'.\" % (self.shrink_wrap.name, self.active.name))\n self.active.modifiers.remove(self.shrink_wrap)\n print(\" » Removing vertex group: %s\" % (self.vgroup.name))\n self.active.vertex_groups.remove(self.vgroup)\n\n m3.set_mode(\"EDIT\")\n return {'CANCELLED'}\n\n return {'RUNNING_MODAL'}\n\n def invoke(self, context, event):\n self.active = m3.get_active()\n\n self.vgroup, self.shrink_wrap = self.main(self.active)\n self.stash = self.active.MM.stashes[self.active.MM.active_stash_idx]\n\n # mouse positions\n self.mouse_x = self.init_mouse_x = self.fixed_mouse_x = event.mouse_region_x\n self.mouse_y = self.init_mouse_y = self.fixed_mouse_y = event.mouse_region_y\n\n args = (self, context)\n self.VIEW3D = bpy.types.SpaceView3D.draw_handler_add(self.draw_VIEW3D, (args, ), 'WINDOW', 'POST_VIEW')\n self.HUD = bpy.types.SpaceView3D.draw_handler_add(self.draw_HUD, (args, ), 'WINDOW', 'POST_PIXEL')\n\n context.window_manager.modal_handler_add(self)\n return {'RUNNING_MODAL'}\n\n def execute(self, context):\n active = m3.get_active()\n\n vgroup, shrink_wrap = self.main(active)\n\n print(\" » Applying modifier '%s' to object '%s'.\" % (shrink_wrap.name, active.name))\n bpy.ops.object.modifier_apply(apply_as='DATA', modifier=shrink_wrap.name)\n\n print(\" » Removing vertex group: %s\" % (vgroup.name))\n active.vertex_groups.remove(vgroup)\n\n m3.set_mode(\"EDIT\")\n return {'FINISHED'}\n\n def main(self, active):\n vert_ids = m3.get_selection(\"VERT\")\n\n m3.set_mode(\"OBJECT\")\n active.show_wire = True\n active.show_all_edges = True\n\n vgroup = self.add_vgroup(active, vert_ids, \"conform\")\n stash = active.MM.stashes[active.MM.active_stash_idx]\n stashobj = stash.obj\n\n # check if the active object has moved, because shrinkwrap doesnt work in local space like data transfer, so the locations need to match\n if stashobj.matrix_world != active.matrix_world:\n stashobj.matrix_world = active.matrix_world\n\n shrink_wrap = self.add_shrink_wrap_mod(active, stashobj, \"conform\", vgroup, self.shrink_wrap_offset, methodict[self.method])\n return vgroup, shrink_wrap\n\n def add_shrink_wrap_mod(self, obj, target, name, vgroup, offset, method):\n # add shrinkwrap mod\n shrink_wrap = obj.modifiers.new(name, \"SHRINKWRAP\")\n shrink_wrap.target = target\n shrink_wrap.vertex_group = vgroup.name\n shrink_wrap.offset = offset\n shrink_wrap.wrap_method = method\n\n shrink_wrap.show_expanded = False\n\n print(\" » Added modifier '%s' to object '%s'.\" % (name, obj.name))\n\n return shrink_wrap\n\n def add_vgroup(self, obj, vert_ids, name):\n vgroup = obj.vertex_groups.new(name=name)\n print(\" » Created new vertex group: %s\" % (name))\n\n vgroup.add(vert_ids, 1, \"ADD\")\n return vgroup\n", "sub_path": "All_In_One/addons/MESHmachine/operators/conform.py", "file_name": "conform.py", "file_ext": "py", "file_size_in_byte": 11629, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "bpy.types", "line_number": 20, "usage_type": "attribute"}, {"api_name": "bpy.props.EnumProperty", "line_number": 26, "usage_type": "call"}, {"api_name": "bpy.props.FloatProperty", "line_number": 28, "usage_type": "call"}, {"api_name": "bpy.props.BoolProperty", "line_number": 30, "usage_type": "call"}, {"api_name": "bpy.props.FloatProperty", "line_number": 31, "usage_type": "call"}, {"api_name": "bpy.props.BoolProperty", "line_number": 33, "usage_type": "call"}, {"api_name": "bpy.props.BoolProperty", "line_number": 34, "usage_type": "call"}, {"api_name": "bpy.props.BoolProperty", "line_number": 37, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 41, "usage_type": "attribute"}, {"api_name": "bmesh.from_edit_mesh", "line_number": 44, "usage_type": "call"}, {"api_name": "bgl.glEnable", "line_number": 62, "usage_type": "call"}, {"api_name": "bgl.GL_BLEND", "line_number": 62, "usage_type": "attribute"}, {"api_name": "bgl.glDisable", "line_number": 65, "usage_type": "call"}, {"api_name": "bgl.GL_DEPTH_TEST", "line_number": 65, "usage_type": "attribute"}, {"api_name": "bgl.glLineWidth", "line_number": 75, "usage_type": "call"}, {"api_name": "bgl.glColor4f", "line_number": 76, "usage_type": "call"}, {"api_name": "bgl.glBegin", "line_number": 78, "usage_type": "call"}, {"api_name": "bgl.GL_LINES", "line_number": 78, "usage_type": "attribute"}, {"api_name": "bgl.glVertex3f", "line_number": 80, "usage_type": "call"}, {"api_name": "bgl.glVertex3f", "line_number": 81, "usage_type": "call"}, {"api_name": "utils.ui.draw_end", "line_number": 83, "usage_type": "call"}, {"api_name": "utils.ui.draw_init", "line_number": 86, "usage_type": "call"}, {"api_name": "utils.ui.draw_title", "line_number": 88, "usage_type": "call"}, {"api_name": "utils.ui.draw_prop", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.ui.draw_prop", "line_number": 94, "usage_type": "call"}, {"api_name": "utils.ui.draw_prop", "line_number": 95, "usage_type": "call"}, {"api_name": "utils.ui.draw_prop", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.ui.draw_prop", "line_number": 99, "usage_type": "call"}, {"api_name": "utils.ui.draw_prop", "line_number": 102, "usage_type": "call"}, {"api_name": "utils.ui.draw_prop", "line_number": 105, "usage_type": "call"}, {"api_name": "utils.ui.draw_prop", "line_number": 107, "usage_type": "call"}, {"api_name": "utils.ui.draw_prop", "line_number": 109, "usage_type": "call"}, {"api_name": "utils.ui.draw_end", "line_number": 111, "usage_type": "call"}, {"api_name": "utils.ui.wrap_mouse", "line_number": 125, "usage_type": "call"}, {"api_name": "utils.ui.step_enum", "line_number": 136, "usage_type": "call"}, {"api_name": "utils.ui.step_collection", "line_number": 139, "usage_type": "call"}, {"api_name": "utils.ui.step_enum", "line_number": 151, "usage_type": "call"}, {"api_name": "utils.ui.step_collection", "line_number": 154, "usage_type": "call"}, {"api_name": "bpy.types.SpaceView3D.draw_handler_remove", "line_number": 198, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 198, "usage_type": "attribute"}, {"api_name": "bpy.types.SpaceView3D.draw_handler_remove", "line_number": 199, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 199, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.modifier_apply", "line_number": 204, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 204, "usage_type": "attribute"}, {"api_name": "utils.MACHIN3.set_mode", "line_number": 215, "usage_type": "call"}, {"api_name": "utils.MACHIN3", "line_number": 215, "usage_type": "name"}, {"api_name": "bpy.types.SpaceView3D.draw_handler_remove", "line_number": 221, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 221, "usage_type": "attribute"}, {"api_name": "bpy.types.SpaceView3D.draw_handler_remove", "line_number": 222, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 222, "usage_type": "attribute"}, {"api_name": "utils.MACHIN3.set_mode", "line_number": 229, "usage_type": "call"}, {"api_name": "utils.MACHIN3", "line_number": 229, "usage_type": "name"}, {"api_name": "utils.MACHIN3.get_active", "line_number": 235, "usage_type": "call"}, {"api_name": "utils.MACHIN3", "line_number": 235, "usage_type": "name"}, {"api_name": "bpy.types.SpaceView3D.draw_handler_add", "line_number": 245, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 245, "usage_type": "attribute"}, {"api_name": "bpy.types.SpaceView3D.draw_handler_add", "line_number": 246, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 246, "usage_type": "attribute"}, {"api_name": "utils.MACHIN3.get_active", "line_number": 252, "usage_type": "call"}, {"api_name": "utils.MACHIN3", "line_number": 252, "usage_type": "name"}, {"api_name": "bpy.ops.object.modifier_apply", "line_number": 257, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 257, "usage_type": "attribute"}, {"api_name": "utils.MACHIN3.set_mode", "line_number": 262, "usage_type": "call"}, {"api_name": "utils.MACHIN3", "line_number": 262, "usage_type": "name"}, {"api_name": "utils.MACHIN3.get_selection", "line_number": 266, "usage_type": "call"}, {"api_name": "utils.MACHIN3", "line_number": 266, "usage_type": "name"}, {"api_name": "utils.MACHIN3.set_mode", "line_number": 268, "usage_type": "call"}, {"api_name": "utils.MACHIN3", "line_number": 268, "usage_type": "name"}]} +{"seq_id": "13130807", "text": "from django.test import TestCase\n\nfrom django.test import TestCase, Client\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom .models import Lugares\nfrom .serializers import LugaresSerializer\n\nimport json\n\n# Create your tests here.\n\nclass LugaresTest(TestCase):\n def setUp(self):\n self.client = Client()\n self.primer_lugar = Lugares.objects.create(\n nombre=\"Lugar1\",\n calle=\"calle1\",\n colonia=\"colonia1\"\n ) \n self.segundo_lugar = Lugares.objects.create(\n nombre=\"Lugar2\",\n calle=\"calle2\",\n colonia=\"colonia2\"\n )\n\n def test_get_all_lugares(self):\n response = self.client.get(reverse(\"lugares_endpoint\"))\n lugares = Lugares.objects.all()\n serializer = LugaresSerializer(lugares, many=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(serializer.data, response.data)\n \n", "sub_path": "apiDamificados/lugares/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.test.TestCase", "line_number": 13, "usage_type": "name"}, {"api_name": "django.test.Client", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Lugares.objects.create", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Lugares.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Lugares", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Lugares.objects.create", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Lugares.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.Lugares", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Lugares.objects.all", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Lugares.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Lugares", "line_number": 29, "usage_type": "name"}, {"api_name": "serializers.LugaresSerializer", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "47278236", "text": "#!/usr/bin/env python\n# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-\n#\n# NetProfile: Geo module\n# © Copyright 2013 Nikita Andriyanov\n# © Copyright 2013-2014 Alex 'Unik' Unigovsky\n#\n# This file is part of NetProfile.\n# NetProfile is free software: you can redistribute it and/or\n# modify it under the terms of the GNU Affero General Public\n# License as published by the Free Software Foundation, either\n# version 3 of the License, or (at your option) any later\n# version.\n#\n# NetProfile is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General\n# Public License along with NetProfile. If not, see\n# .\n\nfrom __future__ import (\n\tunicode_literals,\n\tprint_function,\n\tabsolute_import,\n\tdivision\n)\n\nfrom netprofile.common.modules import ModuleBase\n\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom pyramid.i18n import TranslationStringFactory\n\n_ = TranslationStringFactory('netprofile_geo')\n\nclass Module(ModuleBase):\n\tdef __init__(self, mmgr):\n\t\tself.mmgr = mmgr\n\t\tmmgr.cfg.add_translation_dirs('netprofile_geo:locale/')\n\t\tmmgr.cfg.scan()\n\n\t@classmethod\n\tdef get_models(cls):\n\t\tfrom netprofile_geo import models\n\t\treturn (\n\t\t\tmodels.City,\n\t\t\tmodels.District,\n\t\t\tmodels.Street,\n\t\t\tmodels.House,\n\t\t\tmodels.Place,\n\t\t\tmodels.HouseGroup,\n\t\t\tmodels.HouseGroupMapping\n\t\t)\n\n\t@classmethod\n\tdef get_sql_functions(cls):\n\t\tfrom netprofile_geo import models\n\t\treturn (\n\t\t\tmodels.AddrFormatCompactFunction,\n\t\t\tmodels.AddrFormatFunction,\n\t\t\tmodels.AddrGetFullFunction,\n\t\t\tmodels.AddrListDistrictProcedure,\n\t\t\tmodels.AddrListEntrProcedure,\n\t\t\tmodels.AddrListStreetProcedure\n\t\t)\n\n\t@classmethod\n\tdef get_sql_views(cls):\n\t\tfrom netprofile_geo import models\n\t\treturn (\n\t\t\tmodels.AddrCompactView,\n\t\t\tmodels.AddrExtraView,\n\t\t\tmodels.AddrFullView,\n\t\t\tmodels.AddrStreetNamesView\n\t\t)\n\n\t@classmethod\n\tdef get_sql_data(cls, modobj, sess):\n\t\tfrom netprofile_core.models import (\n\t\t\tGroup,\n\t\t\tGroupCapability,\n\t\t\tPrivilege\n\t\t)\n\n\t\tprivs = (\n\t\t\tPrivilege(\n\t\t\t\tcode='BASE_GEO',\n\t\t\t\tname='Access: Addresses'\n\t\t\t),\n\t\t\tPrivilege(\n\t\t\t\tcode='GEO_LIST',\n\t\t\t\tname='Addresses: List'\n\t\t\t),\n\t\t\tPrivilege(\n\t\t\t\tcode='GEO_CREATE',\n\t\t\t\tname='Addresses: Create'\n\t\t\t),\n\t\t\tPrivilege(\n\t\t\t\tcode='GEO_EDIT',\n\t\t\t\tname='Addresses: Edit'\n\t\t\t),\n\t\t\tPrivilege(\n\t\t\t\tcode='GEO_DELETE',\n\t\t\t\tname='Addresses: Delete'\n\t\t\t)\n\t\t)\n\t\tfor priv in privs:\n\t\t\tpriv.module = modobj\n\t\t\tsess.add(priv)\n\t\ttry:\n\t\t\tgrp_admins = sess.query(Group).filter(Group.name == 'Administrators').one()\n\t\t\tfor priv in privs:\n\t\t\t\tcap = GroupCapability()\n\t\t\t\tcap.group = grp_admins\n\t\t\t\tcap.privilege = priv\n\t\texcept NoResultFound:\n\t\t\tpass\n\n\tdef get_local_js(self, request, lang):\n\t\treturn (\n\t\t\t'netprofile_geo:static/webshell/locale/webshell-lang-' + lang + '.js',\n\t\t)\n\n\tdef get_autoload_js(self, request):\n\t\treturn (\n\t\t\t'NetProfile.geo.form.field.Address',\n\t\t)\n\n\tdef get_css(self, request):\n\t\treturn (\n\t\t\t'netprofile_geo:static/css/main.css',\n\t\t)\n\n\t@property\n\tdef name(self):\n\t\treturn _('Geography')\n\n", "sub_path": "netprofile_geo/netprofile_geo/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 3187, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pyramid.i18n.TranslationStringFactory", "line_number": 36, "usage_type": "call"}, {"api_name": "netprofile.common.modules.ModuleBase", "line_number": 38, "usage_type": "name"}, {"api_name": "netprofile_geo.models.City", "line_number": 48, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 48, "usage_type": "name"}, {"api_name": "netprofile_geo.models.District", "line_number": 49, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 49, "usage_type": "name"}, {"api_name": "netprofile_geo.models.Street", "line_number": 50, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 50, "usage_type": "name"}, {"api_name": "netprofile_geo.models.House", "line_number": 51, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 51, "usage_type": "name"}, {"api_name": "netprofile_geo.models.Place", "line_number": 52, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 52, "usage_type": "name"}, {"api_name": "netprofile_geo.models.HouseGroup", "line_number": 53, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 53, "usage_type": "name"}, {"api_name": "netprofile_geo.models.HouseGroupMapping", "line_number": 54, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 54, "usage_type": "name"}, {"api_name": "netprofile_geo.models.AddrFormatCompactFunction", "line_number": 61, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 61, "usage_type": "name"}, {"api_name": "netprofile_geo.models.AddrFormatFunction", "line_number": 62, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 62, "usage_type": "name"}, {"api_name": "netprofile_geo.models.AddrGetFullFunction", "line_number": 63, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 63, "usage_type": "name"}, {"api_name": "netprofile_geo.models.AddrListDistrictProcedure", "line_number": 64, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 64, "usage_type": "name"}, {"api_name": "netprofile_geo.models.AddrListEntrProcedure", "line_number": 65, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 65, "usage_type": "name"}, {"api_name": "netprofile_geo.models.AddrListStreetProcedure", "line_number": 66, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 66, "usage_type": "name"}, {"api_name": "netprofile_geo.models.AddrCompactView", "line_number": 73, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 73, "usage_type": "name"}, {"api_name": "netprofile_geo.models.AddrExtraView", "line_number": 74, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 74, "usage_type": "name"}, {"api_name": "netprofile_geo.models.AddrFullView", "line_number": 75, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 75, "usage_type": "name"}, {"api_name": "netprofile_geo.models.AddrStreetNamesView", "line_number": 76, "usage_type": "attribute"}, {"api_name": "netprofile_geo.models", "line_number": 76, "usage_type": "name"}, {"api_name": "netprofile_core.models.Privilege", "line_number": 88, "usage_type": "call"}, {"api_name": "netprofile_core.models.Privilege", "line_number": 92, "usage_type": "call"}, {"api_name": "netprofile_core.models.Privilege", "line_number": 96, "usage_type": "call"}, {"api_name": "netprofile_core.models.Privilege", "line_number": 100, "usage_type": "call"}, {"api_name": "netprofile_core.models.Privilege", "line_number": 104, "usage_type": "call"}, {"api_name": "netprofile_core.models.Group", "line_number": 113, "usage_type": "name"}, {"api_name": "netprofile_core.models.Group.name", "line_number": 113, "usage_type": "attribute"}, {"api_name": "netprofile_core.models.GroupCapability", "line_number": 115, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 118, "usage_type": "name"}]} +{"seq_id": "163942504", "text": "#!/usr/bin/env python3\n\nimport os\nimport discord\nimport logging\nimport json\nfrom bot import Bot\nfrom discord.ext import commands\n\n\n# Finde den Prefix eines Servers\ndef get_prefix(bot, message):\n if message.guild is not None:\n try:\n with open('./data/prefixes.json', 'r') as f:\n prefixes = json.load(f)\n return prefixes[str(message.guild.id)]\n except KeyError:\n return '!'\n else:\n return '!'\n\n\nbot = Bot(command_prefix=get_prefix)\n\n\n# Botowner für Permissions zu manchen Commands\ndef botowner(ctx):\n if ctx.author.id == 296323983819669514 or ctx.author.id == 137291894953607168:\n return True\n else:\n return False\n\n\n# Bot herunterfahren\n@bot.command()\n@commands.check(botowner)\nasync def shutdown(ctx):\n await ctx.channel.purge(limit=1)\n await ctx.send(\"Bot wird heruntergefahren...\")\n await bot.logout()\n\n\n@bot.command()\nasync def ping(ctx):\n await ctx.channel.purge(limit=1)\n await ctx.send(f'Pong! Meine Latenz sind aktuell {round(bot.latency * 1000)} ms.')\n\n\n# Modul laden\n@bot.command()\n@commands.check(botowner)\nasync def load(ctx, extension):\n await ctx.channel.purge(limit=1)\n e = extension.lower()\n bot.load_extension(f'cogs.{e}')\n await ctx.send(e + \"aktiviert\")\n print(e + ' aktiviert')\n\n\n# Modul deaktivieren\n@bot.command()\n@commands.check(botowner)\nasync def unload(ctx, extension):\n await ctx.channel.purge(limit=1)\n e = extension.lower()\n bot.unload_extension(f'cogs.{e}')\n print(e + ' deaktiviert')\n await ctx.send(e + ' deaktiviert')\n\n\n# Modul neuladen\n@bot.command()\n@commands.check(botowner)\nasync def reload(ctx, extension):\n await ctx.channel.purge(limit=1)\n e = extension.lower()\n bot.reload_extension(f'cogs.{e}')\n print(e + ' neugeladen')\n await ctx.send(e + ' neugeladen')\n\n\n# Beim start alle module laden die nicht mit test starten\nfor filename in os.listdir('./cogs'):\n if filename.endswith(\".py\"):\n if filename.startswith('test'):\n try:\n bot.load_extension(f'cogs.{filename[:-3]}')\n except Exception:\n print(F'{filename}' + ' ist fehlerhaft')\n else:\n if filename.endswith('.py'):\n bot.load_extension(f'cogs.{filename[:-3]}')\n print(filename[:-3] + ' aktiviert')\n elif filename.endswith('__pycache__'):\n print('Py-Cache gefunden')\n else:\n print(F'{filename}' + ' ist fehlerhaft')\n else:\n pass\n\nbot.run(os.environ['TUMBOT_TOKEN'])", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "bot.Bot", "line_number": 24, "usage_type": "call"}, {"api_name": "bot.logout", "line_number": 41, "usage_type": "call"}, {"api_name": "bot.command", "line_number": 36, "usage_type": "call"}, {"api_name": "discord.ext.commands.check", "line_number": 37, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 37, "usage_type": "name"}, {"api_name": "bot.latency", "line_number": 47, "usage_type": "attribute"}, {"api_name": "bot.command", "line_number": 44, "usage_type": "call"}, {"api_name": "bot.load_extension", "line_number": 56, "usage_type": "call"}, {"api_name": "bot.command", "line_number": 51, "usage_type": "call"}, {"api_name": "discord.ext.commands.check", "line_number": 52, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 52, "usage_type": "name"}, {"api_name": "bot.unload_extension", "line_number": 67, "usage_type": "call"}, {"api_name": "bot.command", "line_number": 62, "usage_type": "call"}, {"api_name": "discord.ext.commands.check", "line_number": 63, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 63, "usage_type": "name"}, {"api_name": "bot.reload_extension", "line_number": 78, "usage_type": "call"}, {"api_name": "bot.command", "line_number": 73, "usage_type": "call"}, {"api_name": "discord.ext.commands.check", "line_number": 74, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 74, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 84, "usage_type": "call"}, {"api_name": "bot.load_extension", "line_number": 88, "usage_type": "call"}, {"api_name": "bot.load_extension", "line_number": 93, "usage_type": "call"}, {"api_name": "bot.run", "line_number": 102, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 102, "usage_type": "attribute"}]} +{"seq_id": "242579928", "text": "# Copyright 2020, Couchbase, Inc.\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\")\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom flaky import flaky\n\nfrom couchbase_tests.base import CollectionTestCase, SkipTest, skip_if_no_collections\nfrom couchbase.exceptions import InvalidArgumentException, SearchIndexNotFoundException\nfrom couchbase.management.search import SearchIndex\nimport uuid\n\n\n@flaky(10,3)\nclass SearchIndexManagerTestCase(CollectionTestCase):\n def setUp(self):\n super(SearchIndexManagerTestCase, self).setUp()\n if not self.is_realserver:\n raise SkipTest('no management tests for mock')\n\n self.indexmgr = self.cluster.search_indexes()\n self.assertIsNotNone(self.indexmgr)\n self.indexname = \"idx-{}\".format(str(uuid.uuid4()))\n try:\n self.indexmgr.drop_index(self.indexname)\n except SearchIndexNotFoundException:\n # maybe it isn't there, that's ok\n pass\n finally:\n # now lets wait till it really seems gone...\n self.try_n_times_till_exception(10, 3, self.indexmgr.get_index, self.indexname)\n\n # Now lets create a new one\n self.indexmgr.upsert_index(SearchIndex(name=self.indexname, source_name='default'))\n\n # insure it is there before we begin test\n self.try_n_times(10, 3, self.indexmgr.get_index, self.indexname)\n\n # seems strange, but even that above can still lead to a INDEX_NOT_FOUND in\n # jenkins at least. Perhaps the http round-robins the calls and hits a machine\n # on the cluster that has not gotten it yet? \n # A guess here - maybe we need to have it succeed once per server (assuming it\n # round-robins. HACK - lets just see...\n self.try_n_times(10, 3, self.indexmgr.get_index, self.indexname)\n self.try_n_times(10, 3, self.indexmgr.get_index, self.indexname)\n\n def tearDown(self):\n try:\n self.indexmgr.drop_index(self.indexname)\n except:\n pass\n\n def test_ingestion_control(self):\n # can't easily test this, but lets at least call them and insure we get no\n # exceptions\n self.assertIsNone(self.try_n_times(10, 3, self.indexmgr.pause_ingest, self.indexname))\n self.assertIsNone(self.try_n_times(10, 3, self.indexmgr.resume_ingest, self.indexname))\n\n def test_query_control(self):\n self.assertIsNone(self.try_n_times(10, 3, self.indexmgr.disallow_querying, self.indexname))\n self.assertIsNone(self.try_n_times(10, 3, self.indexmgr.allow_querying, self.indexname))\n\n def test_plan_freeze_control(self):\n self.assertIsNone(self.try_n_times(10, 3, self.indexmgr.freeze_plan, self.indexname))\n self.assertIsNone(self.try_n_times(10, 3, self.indexmgr.unfreeze_plan, self.indexname))\n\n def test_get_indexed_document_count(self):\n # just be sure we get something back. NOTE: immediately after creation,\n # the document count can give an exception. So... lets try a few times\n # with a sleep.\n self.assertIsNotNone(self.try_n_times(5, 2, self.indexmgr.get_indexed_documents_count, self.indexname))\n\n def test_drop_index(self):\n # you may not be able to drop an index immediately after creating it, so\n # lets retry it till successful.\n self.try_n_times(10, 3, self.indexmgr.drop_index, self.indexname)\n self.try_n_times_till_exception(10, 3, self.indexmgr.get_index, self.indexname)\n self.assertRaises(SearchIndexNotFoundException, self.indexmgr.get_index, self.indexname)\n\n def test_get_all_indexes(self):\n # we know of one, lets make sure it is in the list\n indexes = self.try_n_times(10, 3, self.indexmgr.get_all_indexes)\n for idx in indexes:\n if idx.name == self.indexname:\n return;\n self.fail('did not find {} as expected'.format(self.indexname))\n\n def test_get_index(self):\n index = self.try_n_times(10, 3, self.indexmgr.get_index, self.indexname)\n self.assertIsNotNone(index)\n\n def test_get_index_fail_no_index_name(self):\n self.assertRaises(InvalidArgumentException, self.indexmgr.get_index, None)\n\n def test_get_index_fail(self):\n self.assertRaises(SearchIndexNotFoundException, self.indexmgr.get_index, 'foo')\n\n def test_upsert_index(self):\n index = self.try_n_times(10, 3, self.indexmgr.get_index, self.indexname)\n self.assertIsNone(\n self.indexmgr.upsert_index(SearchIndex(uuid=index.uuid, name=self.indexname, source_name='default')))\n\n @skip_if_no_collections\n def test_analyze_doc(self):\n # like getting the doc count, this can fail immediately after index creation\n doc = {\"field\": \"I got text in here\"}\n analysis = self.try_n_times(5, 2, self.indexmgr.analyze_document, self.indexname, doc)\n self.assertIsNotNone(analysis)\n self.assertEquals(analysis['status'], 'ok')\n", "sub_path": "couchbase/tests_v3/cases/searchmgmt_t.py", "file_name": "searchmgmt_t.py", "file_ext": "py", "file_size_in_byte": 5471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "couchbase_tests.base.CollectionTestCase", "line_number": 24, "usage_type": "name"}, {"api_name": "couchbase_tests.base.SkipTest", "line_number": 28, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 32, "usage_type": "call"}, {"api_name": "couchbase.exceptions.SearchIndexNotFoundException", "line_number": 35, "usage_type": "name"}, {"api_name": "couchbase.management.search.SearchIndex", "line_number": 43, "usage_type": "call"}, {"api_name": "couchbase.exceptions.SearchIndexNotFoundException", "line_number": 87, "usage_type": "argument"}, {"api_name": "couchbase.exceptions.InvalidArgumentException", "line_number": 102, "usage_type": "argument"}, {"api_name": "couchbase.exceptions.SearchIndexNotFoundException", "line_number": 105, "usage_type": "argument"}, {"api_name": "couchbase.management.search.SearchIndex", "line_number": 110, "usage_type": "call"}, {"api_name": "couchbase_tests.base.skip_if_no_collections", "line_number": 112, "usage_type": "name"}, {"api_name": "flaky.flaky", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "450051395", "text": "import cv2\nimport sqlite3\ncam = cv2.VideoCapture(0)\ndetector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\ndef insertOrUpdate(Id,Name):\n conn=sqlite3.connect(\"Database.db\")\n cmd = \"SELECT * FROM Check WHERE ID =\"+str(Id )\n cursor = conn.execute(cmd)\n isRecordExist=0\n for row in cursor:\n isRecordExist =1\n if(isRecordExist ==1):\n cmd = \"UPDATE ID SET NAME=\"+srt(Name)+\"WHERE ID=\"+srt(Id)\n else:\n cmd= \"INSERT INTO Check Attendance(ID, NAME) Values(\"+str(Id)+\",'\"+str(Name)+\"')\"\n conn.execute(cmd)\n conn.commit()\n\n\nid=input('enter your id')\nname=input('enter your name')\nsampleNum=0\nwhile True:\n ret, im = cam.read()\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n faces = detector.detectMultiScale(gray, scaleFactor= 1.2, minNeighbors =5, minSize=(30,30))\n for(x,y,w,h) in faces:\n sampleNum = sampleNum+1\n cv2.imwrite(\"dataset/user.\"+id+'.'+str(sampleNum)+ \".jpg\", gray[y:y+h, x:x+w])\n cv2.rectangle(im, (x-50,y-50),(x+w+50, y+h+50),(255,0,0),2)\n cv2.imshow('im',im)\n cv2.waitKey(100)\n if sampleNum > 20:\n cam.release()\n cv2.destroyAllWindows()\n break\n", "sub_path": "Quasars/Open CV and Python/datagenerator.py", "file_name": "datagenerator.py", "file_ext": "py", "file_size_in_byte": 1179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "cv2.VideoCapture", "line_number": 3, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 4, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "31813847", "text": "\"\"\"\nUsing single Us4OEM to acquire a single STA sequence.\n\nIn this example:\n\n- we configure Us4OEM,\n- we define STA-like sequence of firings using single-element Tx aperture,\n stride 1,\n- run the sequence and acquire a single RF frame.\n\"\"\"\n\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport arrus\nimport itertools\nfrom arrus.ops import (\n Tx, Rx, TxRx,\n Sequence, SetHVVoltage\n)\nfrom arrus import (\n SineWave,\n SingleElementAperture,\n RegionBasedAperture,\n CustomUs4RCfg,\n Us4OEMCfg,\n SessionCfg\n)\n\n\ndef main():\n # -- DEVICE CONFIGURATION.\n\n # Prepare system description.\n # Customize this configuration for your setup.\n system_cfg = CustomUs4RCfg(\n n_us4oems=2,\n is_hv256=True\n )\n # Prepare Us4OEM initial configuration.\n us4oem_cfg = Us4OEMCfg(\n channel_mapping=\"esaote\",\n active_channel_groups=[1]*16,\n dtgc=0,\n active_termination=200,\n log_transfer_time=True\n )\n\n # -- PROGRAMMING TX/RX SEQUENCE.\n n_firings_per_frame = 4\n n_frames = 128\n n_samples = 4*1024\n\n def get_full_rx_aperture(element_number):\n \"\"\"\n This function creates a sequence of 4 Tx/Rx's with Tx aperture\n containing a single active element ``element_number``.\n The sequence allow to acquire a single frame using 128 Rx channels.\n \"\"\"\n operations = []\n for i in range(n_firings_per_frame):\n tx = Tx(excitation=SineWave(frequency=8.125e6, n_periods=1.5,\n inverse=False),\n aperture=RegionBasedAperture(origin=element_number, size=1),\n pri=300e-6)\n rx = Rx(n_samples=n_samples,\n fs_divider=2,\n aperture=RegionBasedAperture(i*32, 32),\n rx_time=260e-6,\n rx_delay=5e-6)\n txrx = TxRx(tx, rx)\n operations.append(txrx)\n return operations\n\n tx_rx_sequence = Sequence(list(itertools.chain(*[\n get_full_rx_aperture(channel)\n for channel in range(n_frames)\n ])))\n\n # -- RUNNING TX/RX SEQUENCE\n\n # Configure and create communication session with the device.\n session_cfg = SessionCfg(\n system=system_cfg,\n devices={\n \"Us4OEM:0\": us4oem_cfg,\n }\n )\n with arrus.Session(cfg=session_cfg) as sess:\n # Enable high voltage supplier.\n hv256 = sess.get_device(\"/HV256\")\n # Get first available Us4OEM module.\n us4oem = sess.get_device(\"/Us4OEM:0\")\n\n # Set voltage on HV256.\n sess.run(SetHVVoltage(50), feed_dict=dict(device=hv256))\n\n # Acquire a single RF frame of shape\n # (N_OPERATIONS*N_SAMPLES, N_RX_CHANNELS).\n frame = sess.run(tx_rx_sequence, feed_dict=dict(device=us4oem))\n\n # Reshape acquired data:\n # - from (N_FRAMES * N_FIRING_PER_FRAME * N_SAMPLES, N_RX_CHANNELS)\n # that is: (N_OPERATIONS*N_SAMPLES, N_RX_CHANNELS)\n # - to (N_FRAMES, N_SAMPLES, N_FIRING_PER_FRAME * N_RX_CHANNELS)\n frame = frame.reshape((n_frames*n_firings_per_frame,\n n_samples,\n us4oem.get_n_rx_channels()))\n frame = frame.transpose((0, 2, 1))\n frame = frame.reshape((n_frames,\n n_firings_per_frame*us4oem.get_n_rx_channels(),\n n_samples))\n frame = frame.transpose((0, 2, 1))\n # Display the data using matplotlib.\n display_acquired_frame(frame)\n\n\ndef display_acquired_frame(rf, window_sizes=(7, 7)):\n fig, ax = plt.subplots()\n fig.set_size_inches(window_sizes)\n\n ax.set_xlabel(\"Channels\")\n ax.set_ylabel(\"Samples\")\n fig.canvas.set_window_title(\"RF data\")\n\n canvas = plt.imshow(rf[0, :, :],\n vmin=np.iinfo(np.int16).min,\n vmax=np.iinfo(np.int16).max)\n fig.show()\n\n for frame_number in range(rf.shape[0]):\n canvas.set_data(rf[frame_number, :, :])\n ax.set_aspect(\"auto\")\n fig.canvas.flush_events()\n ax.set_xlabel(f\"Channels (tx: {frame_number})\")\n plt.draw()\n time.sleep(0.1)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "api/python/examples/us4oem/us4oem_x1_sta_single.py", "file_name": "us4oem_x1_sta_single.py", "file_ext": "py", "file_size_in_byte": 4264, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "arrus.CustomUs4RCfg", "line_number": 36, "usage_type": "call"}, {"api_name": "arrus.Us4OEMCfg", "line_number": 41, "usage_type": "call"}, {"api_name": "arrus.ops.Tx", "line_number": 62, "usage_type": "call"}, {"api_name": "arrus.SineWave", "line_number": 62, "usage_type": "call"}, {"api_name": "arrus.RegionBasedAperture", "line_number": 64, "usage_type": "call"}, {"api_name": "arrus.ops.Rx", "line_number": 66, "usage_type": "call"}, {"api_name": "arrus.RegionBasedAperture", "line_number": 68, "usage_type": "call"}, {"api_name": "arrus.ops.TxRx", "line_number": 71, "usage_type": "call"}, {"api_name": "arrus.ops.Sequence", "line_number": 75, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 75, "usage_type": "call"}, {"api_name": "arrus.SessionCfg", "line_number": 83, "usage_type": "call"}, {"api_name": "arrus.Session", "line_number": 89, "usage_type": "call"}, {"api_name": "arrus.ops.SetHVVoltage", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "numpy.iinfo", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 127, "usage_type": "attribute"}, {"api_name": "numpy.iinfo", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 128, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "424005347", "text": "from .engine import SimEngine\n\nimport logging\nl = logging.getLogger(\"angr.engines.failure\")\n\nclass SimEngineFailure(SimEngine): #pylint:disable=abstract-method\n def __init__(self, project):\n\n super(SimEngineFailure, self).__init__()\n\n self.project = project\n\n def _check(self, state, **kwargs):\n\n addr = state.se.any_int(state._ip)\n jumpkind = state.history.jumpkind\n\n if jumpkind in ('Ijk_EmFail', 'Ijk_MapFail') or jumpkind.startswith('Ijk_Sig'):\n return True\n if jumpkind == 'Ijk_NoDecode' and not self.project.is_hooked(addr):\n return True\n if jumpkind == 'Ijk_Exit':\n return True\n return False\n\n def process(self, state, **kwargs):\n\n from ..procedures import SIM_PROCEDURES\n\n addr = state.se.any_int(state._ip)\n\n if state.history.jumpkind in (\"Ijk_EmFail\", \"Ijk_MapFail\") or \"Ijk_Sig\" in state.history.jumpkind:\n raise AngrExitError(\"Cannot execute following jumpkind %s\" % state.history.jumpkind)\n\n elif state.history.jumpkind == \"Ijk_NoDecode\" and not self.project.is_hooked(addr):\n raise AngrExitError(\"IR decoding error at %#x. You can hook this instruction with \"\n \"a python replacement using project.hook\"\n \"(%#x, your_function, length=length_of_instruction).\" % (addr, addr))\n\n elif state.history.jumpkind == 'Ijk_Exit':\n l.debug('Execution terminated at %#x', addr)\n terminator = SIM_PROCEDURES['stubs']['PathTerminator'](project=self.project)\n peng = self.project.factory.procedure_engine\n return peng.process(state, terminator, force_addr=addr)\n\n else:\n return SimSuccessors.failure()\n\n #\n # Pickling\n #\n\n def __setstate__(self, state):\n super(SimEngineFailure, self).__setstate__(state)\n self.project = state['project']\n\n def __getstate__(self):\n s = super(SimEngineFailure, self).__getstate__()\n s['project'] = self.project\n return s\n\nfrom ..errors import AngrExitError\nfrom .successors import SimSuccessors\n", "sub_path": "angr/engines/failure.py", "file_name": "failure.py", "file_ext": "py", "file_size_in_byte": 2154, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 4, "usage_type": "call"}, {"api_name": "engine.SimEngine", "line_number": 6, "usage_type": "name"}, {"api_name": "procedures.SIM_PROCEDURES", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "406128459", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nplt.rcParams['svg.fonttype'] = 'none'\n\n# %%\n\ntsne = pd.read_table(\"../../Simulation4/rep2/tsne/tsne_pca.txt\", index_col=0)\n\nce = pd.read_table(\n \"../../../data/Simulated4/rep2/cell_effects.txt\", index_col=0\n)\n\ndata = tsne.join(ce)\n\n# %%\n\n\nfig, axs = plt.subplots(1, ce.shape[1]-1, figsize=(9, 2.5))\ncbar_ax = fig.add_axes([.9, .1, .01, .2])\n\nimport matplotlib.colors\ncmap = matplotlib.colors.LinearSegmentedColormap.from_list(\n 'grays', ['#D2D2D2', '#000000']\n)\n\nfor ax, col in zip(\n axs.ravel(), ce.columns[1:]\n):\n\n plt.sca(ax)\n vals = data[col]\n if 'evf1' in col or 'evf5' in col:\n vals = vals.subtract(vals.mean()).divide(vals.std())\n vmin = -1\n vmax = 1\n else:\n vmin = 0\n vmax = 1.5\n\n sc = plt.scatter(\n x=data.tsne1, y=data.tsne2, c=vals,\n s=1, vmin=vmin, vmax=vmax, cmap=cmap,\n rasterized=True,\n )\n plt.xticks([])\n plt.yticks([])\n for sp in ax.spines.values():\n sp.set_visible(False)\n plt.title(col[-1])\n\ncb = plt.colorbar(sc, cax=cbar_ax, ticks=[-1, 1])\ncb.set_label('Cell-Effect', labelpad=10, rotation=0, size=9, verticalalignment='center')\ncbar_ax.set_yticklabels(['Low', 'High'], size=7)\n\nplt.subplots_adjust(right=0.95, left=0.05, top=0.75)\nplt.suptitle('Simulated Components')\n# plt.show()\nplt.savefig('Simulation_tSNES.svg', dpi=300)\n", "sub_path": "Transcriptomics/Figures/Simulation/plotTSNEs.py", "file_name": "plotTSNEs.py", "file_ext": "py", "file_size_in_byte": 1422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 5, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "pandas.read_table", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colors.LinearSegmentedColormap.from_list", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colors", "line_number": 24, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.sca", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "38959138", "text": "from __future__ import print_function, division\n\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate, multiply, concatenate\nfrom keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D, Lambda\nfrom keras.layers.advanced_activations import LeakyReLU,ReLU\nfrom keras.layers.convolutional import UpSampling2D, Conv2D\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\nimport keras.backend as K\n\nfrom .partials import GConvNet\nfrom .encoders import build_dense_encoder\n \nclass AAE(GConvNet):\n def __init__(self,img_shape=(28,28,1),y_dim=35,z_dim=35,num_classes=10):\n self.num_classes = num_classes\n\n GConvNet.__init__(self,img_shape,y_dim,z_dim)\n \n optimizer = Adam(0.0002, 0.5)\n losses = {\n 'G':'mse',\n 'C':'categorical_crossentropy',\n }\n\n # Build and the discriminator and recognition network\n self.E, self.Q = self.build_enc_w_qnet()\n\n # Build and compile the recognition network Q\n self.Q.compile(loss=[self.mutual_info_loss],\n optimizer=optimizer,\n metrics=['accuracy'])\n\n # Build the generator\n self.G = self.build_generator()\n\n # The encoder takes images as input\n # and generates a latent embedding\n enc_input = Input(shape=self.img_shape,name='image_input')\n latent = self.E(enc_input)\n \n # The classifier (Q) attempts to label the input image using the y_dim of latent\n target_label = self.Q(enc_input)\n\n # The generator uses the entire latent representation (y and z) to reconstruct the image\n recon = self.G(latent)\n \n # For the combined model we will only train the generator\n# self.E.trainable = False\n\n\n # The combined model (stacked encoder and generator)\n self.combined = Model(enc_input, [recon, target_label])\n self.combined.compile(loss=losses,\n optimizer=optimizer)\n \n def build_enc_w_qnet(self):\n img = Input(shape=self.img_shape)\n img_embedding = build_dense_encoder(input_shape=self.img_shape,layers=[3000,2000,500])(img)\n # z_lat_encoding\n z_lat = Dense(self.z_dim, activation='linear',name='z_dim')(img_embedding)\n\n # y_lat_encoding\n y_lat = Dense(self.y_dim, activation='linear',name='y_dim')(img_embedding)\n \n # Q net classifier\n q_net = Dense(128, activation='relu')(y_lat)\n label = Dense(self.num_classes, activation='softmax',name='label')(q_net)\n \n # Combined Latent Representation\n latent = Concatenate(name='latent')([y_lat,z_lat])\n\n # Return encoder (Encoder) and recognition network (Q)\n return Model(img, latent,name='Encoder'), Model(img, label,name='Classifier')\n\n\n def mutual_info_loss(self, c, c_given_x):\n \"\"\"The mutual information metric we aim to minimize\"\"\"\n eps = 1e-8\n conditional_entropy = K.mean(- K.sum(K.log(c_given_x + eps) * c, axis=1))\n entropy = K.mean(- K.sum(K.log(c + eps) * c, axis=1))\n\n return conditional_entropy + entropy", "sub_path": "src/test_models/adversarial_duplex.py", "file_name": "adversarial_duplex.py", "file_ext": "py", "file_size_in_byte": 3192, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "partials.GConvNet", "line_number": 15, "usage_type": "name"}, {"api_name": "partials.GConvNet.__init__", "line_number": 19, "usage_type": "call"}, {"api_name": "partials.GConvNet", "line_number": 19, "usage_type": "name"}, {"api_name": "keras.optimizers.Adam", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 59, "usage_type": "call"}, {"api_name": "encoders.build_dense_encoder", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.layers.Concatenate", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.backend.mean", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 81, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.backend.log", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.backend.mean", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 82, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.backend.log", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "263117590", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 18 20:49:02 2019\n\n@author: erigara\n\"\"\"\nfrom time import sleep\nimport matplotlib.pyplot as plt\nimport trilateration\nimport math\nimport numpy as np\nimport random\nimport itertools\nfrom scipy.optimize import least_squares\n\ndef find_dist(x,y, x_, y_):\n return ((x-x_)**2+(y-y_)**2)**0.5\n\n\ndef dist_to_rssi(dist, benchmark_rssi):\n return benchmark_rssi - 20*math.log10(dist)\n \ndef rssi_to_dist(rssi, benchmark_rssi):\n n = 2\n benchmark_dist = 1\n delta_rssi = rssi - benchmark_rssi\n dist = benchmark_dist*10**(-delta_rssi/(10*n))\n return dist\n\n#X = np.linspace(1, 8, 100)\n#Y = abs(5*np.sin(X))\nx = random.randint(1,3)\ny = random.randint(1,5)\nX = [x for i in range(100)]\nY = [y for i in range(100)]\ncoords = [np.array([[x,],[y,]]) for x,y in ((0,0), (3,0), (0,5), (3,5))]\n\n#random.seed(0)\nfor subset in itertools.combinations(coords, 4):\n LS_RES = []\n OPT_RES = []\n SM_RES = []\n for (x,y) in zip(X, Y): \n dist =[find_dist(x,y, coord[0,0], coord[1,0]) for coord in subset]\n rssi = [dist_to_rssi(d, -40)*random.normalvariate(1, 0.01**2) for d in dist]\n noisy_dist = [rssi_to_dist(r, -40) for r in rssi]\n \n ls_res = trilateration.ls_trilateration(subset, noisy_dist).flatten()\n opt_res = trilateration.ls_trilateration(subset, noisy_dist, method=\"nlls\").flatten()\n sm_res = trilateration.ls_trilateration(subset, noisy_dist, method=\"sm\", eps=0.01, rate=1).flatten()\n LS_RES.append(ls_res)\n OPT_RES.append(opt_res)\n SM_RES.append(sm_res)\n \n LS_RES = np.array(LS_RES)\n unx_mean, uny_mean =np.mean(LS_RES, axis =0)\n ls_std = np.sqrt(np.mean(np.linalg.norm(LS_RES - (x,y), axis=1)**2))\n #plt.scatter(LS_RES[:,0], LS_RES[:,1], color = 'b')\n \n circle = plt.Circle((x,y), ls_std, color='r', fill=False)\n plt.gca().add_artist(circle)\n \n OPT_RES = np.array(OPT_RES)\n x_mean, y_mean =np.mean(OPT_RES, axis =0)\n opt_std = np.sqrt(np.mean(np.linalg.norm(OPT_RES - (x,y), axis=1)**2))\n plt.scatter(OPT_RES[:,0], OPT_RES[:,1], color = 'g')\n \n circle = plt.Circle((x,y), opt_std, color='r', fill=False)\n plt.gca().add_artist(circle)\n \n \n SM_RES = np.array(SM_RES)\n smx_mean, smy_mean =np.mean(SM_RES, axis =0)\n sm_std = np.sqrt(np.mean(np.linalg.norm(SM_RES - (x,y), axis=1)**2))\n plt.scatter(SM_RES[:,0], SM_RES[:,1], color='k')\n \n circle = plt.Circle((x,y), sm_std, color='r', fill=False)\n plt.gca().add_artist(circle)\n \n plt.scatter([x,],[y,], color='r')\n \n plt.scatter(x_mean, y_mean)\n plt.show()\n \n print(\"------------------------------------------------------------------\")\n print(\"for point ({},{}) computed unoptimized mean is ({},{}) with std {}\".format(x,y,unx_mean, uny_mean, ls_std))\n print(\"for point ({},{}) computed nlls optimized mean is ({},{}) with std {}: \".format(x,y,x_mean, y_mean, opt_std))\n print(\"for point ({},{}) computed ms optimized mean is ({},{}) with std {}\".format(x,y,smx_mean, smy_mean, sm_std))\n print(\"------------------------------------------------------------------\")\n", "sub_path": "tests/trilateration_test.py", "file_name": "trilateration_test.py", "file_ext": "py", "file_size_in_byte": 3176, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "math.log10", "line_number": 22, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 33, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 40, "usage_type": "call"}, {"api_name": "random.normalvariate", "line_number": 46, "usage_type": "call"}, {"api_name": "trilateration.ls_trilateration", "line_number": 49, "usage_type": "call"}, {"api_name": "trilateration.ls_trilateration", "line_number": 50, "usage_type": "call"}, {"api_name": "trilateration.ls_trilateration", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 58, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 66, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 75, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "490501201", "text": "#!/usr/bin/python3\n\n# imports\nimport re\nimport math\nimport requests\nimport sys\nfrom subprocess import call\nimport os\n\n######################## 1) LOAD FILE #####################################\n\ndef load_pdb(pdb_id):\n \"\"\" Function outputs amino acid sequence from the PDB ID\n that is requested by the user.\n A comment will be made if the program removes any unknown amino acids\n \"\"\"\n # if removes any spaces written by the user\n pdb_id = pdb_id.replace(' ', '')\n # test if the length of the input is correct.\n if len(pdb_id) != 4:\n print('The input ', pdb_id, ' is not a PDB ID', '\\n')\n sys.exit(1)\n\n # imports pdb file from the PDB website\n try:\n pdb_web = requests.get('http://www.rcsb.org/pdb/download/downloadFile.do?fileFormat=PDB&compression=NO&structureId=' + pdb_id)\n outfile = open('pdb_file.pdb', 'w')\n except requests.ConnectionError as error:\n sys.stdout.write('Error when loading file from the PDB server: '\n + str(error) + '\\n')\n sys.exit(1)\n except IOError as error:\n sys.stdout.write('Cannot write file, reason:' + str(error) + '\\n')\n sys.exit(1)\n except:\n sys.stdout.write('Unknown error has occurred when downloading the PDB file from the web site')\n\n # changes the format to txt file and downloads it on the computer\n for char in pdb_web.text:\n if char == '\\n':\n outfile.write('\\n')\n else:\n outfile.write(char)\n outfile.close()\n\n # attempt to open the pdb_file\n try:\n infile = open('pdb_file.pdb', 'r')\n # if the PDB file downloaded is empty and error will be raised\n if not os.stat('pdb_file.pdb').st_size:\n sys.stdout.write('The pdb_file downloaded is empty. This may be because no PDB file exist for this PDB ID\\n')\n sys.exit(1)\n except IOError as error:\n sys.stdout.write('Cannot open file, reason:' + error.strerror + '\\n')\n sys.exit(1)\n\n # used to insure that the sequence only contains the 20 amino acids\n aa_verify = (\n 'ALA', 'ARG', 'ASN','ASP', 'CYS', 'GLU', 'GLN',\n 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE',\n 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL'\n )\n re_prot_term = None\n re_aa_pdb = None\n aa_num = 0\n aa_seq = []\n stop_flag = False\n # find all Asn og Gln in sequnece\n for line in infile:\n # find the amino acids in pdb file\n re_aa_pdb = re.search('^ATOM\\s+\\d+\\s+\\w.+?\\s+(\\w+\\s\\w\\s+\\d+)\\s+.+$', line)\n re_prot_term = re.search('^TER\\s+\\d*\\s*(ASN)\\s*\\w\\s*\\d*', line)\n\n # if there is a terminal Asn on one of the chains in the\n # PDB file, then that is added to the correct position\n if re_prot_term is not None:\n aa_seq.append('ASN')\n\n if 'ENDMDL ' in line:\n stop_flag = True\n\n # splits the amino acids into [amino_acid, chain, amino acid number]\n if re_aa_pdb is not None and not stop_flag:\n aa_pdb = re_aa_pdb.group(1)\n aa_pos = aa_pdb.split()\n\n # if amino acid number is different from earlier shown and\n # the earlier number is greater than the new number.\n if aa_pos[2] != aa_num:\n aa_num = aa_pos[2]\n # checks that all amino acids are the 20 canonical amino acids\n if aa_pos[0] in aa_verify:\n aa_seq.append(aa_pos[0])\n else:\n print('amino acid ', aa_pos[0], 'at position', aa_num,\n 'is non canonical amino acid')\n aa_seq.append(aa_pos[0])\n infile.close()\n return aa_seq\n\n##################### 2) IMPORT DATA FROM DSSP ############################\n\ndef dssp_import():\n \"\"\" Function runes the DSSP program and saves the result in\n a file named dssp.txt ready to be used by function named HH_bind()\n \"\"\"\n # sends the PDB file to the DSSP server and retrieves DSSP file\n call('dssp-2.0.4 pdb_file.pdb > dssp.txt', shell = True)\n\n # opens DSSP file if it can be found\n try:\n dssp_input = open('dssp.txt', 'r')\n except IOError as error:\n sys.stdout.write(\"Cannot open file, reason:\" + error.strerror + \"\\n\")\n sys.exit(1)\n return dssp_input\n\n####################### 3) PRIMARY STRUCTURE ##############################\n\ndef prim_calc(next_aa, steric, CONS):\n \"\"\" Function calculates the half times of a asparagine or glutamine\n from the primary structure\n Use in primary_half_time()\n \"\"\"\n # make calculation of each amines half time\n half_time = math.log(2) / 86400 * math.exp(steric / 100 + CONS)\n # correcting for hydrolysis\n half_time_hydro = 1 / ((1 / 8000) + (1 / half_time ))\n return half_time_hydro\n\n###########################################################################\n\ndef primary_half_time(aa_seq):\n \"\"\" Function finds steric hindrance values for the amino acids that fit the\n criteria for deamination. A function is called to calculates the\n deamination half time from the primary structure.\n The asparagine and glutamine data is collected by\n this function and append to lists.\n \"\"\"\n ## dictionaries for all defined steric hindrance values for the Asn-Xxx\n ## and Gln-Xxx.\n # P,Y and W are not included for Glutamine as there is no experimental\n # data available\n steric_asn_info = {\n \t\t 'GLY':0.0, 'HIS':219.5, 'SER':262.0, 'ALA':306.8,\n 'ASP':333.7, 'THR':370.6, 'CYS':378.8, 'LYS':393.1,\n 'MET':396.5, 'GLU':401.1, 'ARG':400.7, 'PHE':411.9,\n 'TYR':425.1, 'TRP':444.0, 'LEU':466.3, 'VAL':538.5,\n 'ILE':561.1,\n 'PRO':500, 'ASN':40, 'GLN':60,\n }\n steric_gln_info = {\n 'GLY':0.0, 'HIS':350.4, 'SER':334.4, 'ALA':347.1,\n 'ASP':562.3, 'THR':305.3, 'CYS':127.6, 'LYS':353.7,\n 'MET':273.5, 'GLU':482.0, 'ARG':459.5, 'PHE':602.0,\n 'LEU':367.7, 'VAL':399.7,'ILE':379.0\n \t\t }\n sum_asn_data, sum_gln_data, asn_pos_lst = [], [], []\n count = 0\n AA_LENGTH = len(aa_seq)\n # finds all Asn and Gln in sequence and appends the amino acids on the\n # C-terminal of the Asn, Gln\n for num in range(AA_LENGTH):\n count += 1\n # every amino acids in the sequnece is defined as aa\n aa = aa_seq[num]\n # the previous amino acid is stored as previous_aa\n previous_aa = aa_seq[num-1]\n # finds asparagine residues\n if aa == 'ASN' and num < AA_LENGTH - 1:\n # finds amino acid on C-term of Asn and test conditions\n next_aa = aa_seq[num+1]\n # inputs the positions of the asparagine residues\n asn_pos_lst.append(count)\n # find steric hindrance value from dict of\n steric_asn = steric_asn_info[next_aa]\n # specific constant for asparagine deamination\n ASN_CONS = 11.863\n # calls function to make primary calculations\n half_time_hydro = prim_calc(next_aa, steric_asn, ASN_CONS)\n # appends half time to list\n sum_asn_data.append(half_time_hydro)\n\n # if the terminal amino acid is an asparagine\n # the value will be calculated\n elif num == AA_LENGTH - 1 and aa == 'ASN':\n # find steric hindrance value from dict of\n steric_asn = steric_asn_info[aa]\n # specific constant for asparagine deamination\n ASN_CONS = 11.863\n # calls function to make primary calculations\n half_time_hydro = prim_calc(aa, steric_asn, ASN_CONS)\n # appends half time to list\n sum_asn_data.append(half_time_hydro)\n\n # finds Glutamine residues\n elif (aa == 'GLN' and previous_aa != 'ASN' and previous_aa != 'GLN' and\n num < AA_LENGTH - 1):\n # finds amino acid on C-term of Gln and test conditions\n next_aa = aa_seq[num+1]\n # different conditions are present due to insufficient data\n exclusion_aa = ['PRO', 'TRP', 'TYR', 'GLN', 'ASN']\n if next_aa not in exclusion_aa:\n # find steric hindrance value from dict\n steric_gln = steric_gln_info[next_aa]\n # specific constant for glutamine deamination\n GLN_CONS = 18.311\n # calls function to make primary calculations\n half_time_hydro = prim_calc(next_aa, steric_gln, GLN_CONS)\n # appends half time to list\n sum_gln_data.append(half_time_hydro)\n return asn_pos_lst, sum_asn_data, sum_gln_data\n\n########################## 3) S-VALUE FIND ##############################\n\ndef structure_finder(structure):\n \"\"\" Function simplifies the DSSP output for the secondary structure.\n The function output is a string consisting of alpha helix = H,\n beta-sheet = B and coil = T.\n Function used in HH_bind()\n \"\"\"\n if structure == 'H' or structure == 'I' or structure == 'G':\n structure_point = 'H'\n # finds beta-sheet structure\n elif structure == 'B' or structure == 'E':\n structure_point = 'B'\n # finds coil structure\n elif structure == 'T' or structure == 'S' or structure == ' ':\n structure_point = 'T'\n return structure_point\n\n##################################################################################\n\ndef S8_finder(line):\n \"\"\" Function finds the S8 value.\n S8 value is the number of interactions with the Asn side chain.\n Function used by HH_bind()\n \"\"\"\n NH_bind1 = line[38:45].split( maxsplit = 0)\n NH_bind2 = line[63:67].split( maxsplit = 0)\n\n # find S8. number of H-H to the backbone of the Neighboring N\n if NH_bind1[0] != '0' or NH_bind2[0] != '0':\n S8 = 1\n else:\n S8 = 0\n return S8\n\n##################################################################################\n\ndef S7_finder(line):\n \"\"\" Function finds the S7 value from the DSSP_input.\n The S7 is the number of interactions to the NH2 on the COOH side of Asn\n Function is called in the HH_bond() function.\n \"\"\"\n # find S7. number of H-H bonds on Asn NH2 side chain\n N_chain_H_bind = line[25:33].split()\n # if 0 in list there is 1 or 0 H-H\n if '0' in N_chain_H_bind:\n # if S7 = 0 then there is no H-H to amine\n if N_chain_H_bind[0] == '0' and N_chain_H_bind[1] == '0':\n S7 = 0\n else:\n S7 = 1\n # if there is no 0 in list there is two H-H on NH2\n else:\n S7 = 2\n return S7\n\n##################################################################################\n\ndef HH_bond(dssp_input):\n \"\"\" Function reads the DSSP input file and extracts information\n to calculate S7 and S8. The function also outputs a simplification of\n the structure_data\n \"\"\"\n S7_collect, S8_collect = [], []\n previous_aa = None\n aa = None\n flag_start = False\n structure_data = ''\n amin_search = False\n for line in dssp_input:\n # finds header line\n if ' # RESIDUE AA STRUCTURE ' in line:\n flag_start = True\n # finds all lines with structural information\n elif flag_start:\n # find all the secondary structures from DSSP\n CHAIN = 11\n # amino acid defined as aa\n aa = line[CHAIN+2]\n # structure information position\n structure = line[CHAIN+5]\n # function that simplifies the DSSP output. The function output is\n # appended to a string\n structure_point = structure_finder(structure)\n structure_data += (structure_point)\n\n # finds all Asn residues in the sequence\n if aa == 'N':\n # calls function to find the S7 value\n S7 = S7_finder(line)\n S7_collect.append(S7)\n amin_search = True\n # special case for S8 where to asn follow each other.\n # finds all the residues that are on the COOH side\n # of a asparagine\n if previous_aa == 'N':\n # Function finds first and second N-H binding\n # positon for backbone\n S8 = S8_finder(line)\n S8_collect.append(S8)\n\n # all the C-terminal proteins that fit the criteria are\n # found downstream of Asn\n elif amin_search:\n # reset flag to false\n amin_search = False\n # Function finds first and second N-H binding\n # positon for backbone\n S8 = S8_finder(line)\n S8_collect.append(S8)\n\n # saves the previous amino acid\n previous_aa = aa\n\n if len(structure_data) != len(aa_seq):\n print('WORNING: The length of the DSSP import file is not',\n 'the same as the length of the amino acid list found in the PDB file\\n',\n 'This may lead to incorrect results', 'Control the PDB and DSSP files\\n',\n 'length data from PDB file = ', len(aa_seq),\n ' length data from DSSP = ', len(structure_data), sep='')\n return S7_collect, S8_collect, structure_data\n\n#############################################################################\n\ndef helix_S(asn_pos_lst, structure_data):\n \"\"\" Function finds the asparagine positioned in helix structures.\n All asparagine that are in a helix are given a one zero value\n on ether S1,S2 or S3.\n \"\"\"\n S1_collect, S2_collect, S3_collect = [], [], []\n # loops through the positions of the asparagine hits, to find the S1-3 values\n for element in asn_pos_lst:\n # finds the asparagines that are in a helix\n if structure_data[element] == 'H':\n\n # find the structural information of the amino acids adjacent to\n # the asparagine\n sub_string = structure_data[element-2:element+3]\n # if there are only helixes in the sub string then the asparagine is\n # placed deep inside of a helix. therefor S3 = 1 , S1 = 0, S2 = 0\n if 'T' in sub_string or 'B' in sub_string:\n # count the 'H' in the string before and after the hit.\n # if there can only be 2 scenarios for each of the two sub_strings\n before_element = sub_string[:2].count('H')\n after_element = sub_string[-2:].count('H')\n # special case where S1 = 1\n if sub_string[:2] == 'HT' or sub_string[:2] == 'HB':\n S1 = 1\n S2 = 0\n # special case where S2 = 1\n elif sub_string[-2:] == 'TH' or sub_string[-2:] == 'BH':\n S2 = 1\n S1 = 0\n # if a helix is flanked by other structures\n elif before_element == after_element:\n S1 = before_element\n S2 = 0\n # if there are more helix structures upstream than downstream\n elif before_element <= after_element:\n S1 = before_element + 1\n S2 = 0\n # if there are more helix structures downstream than upstream\n elif after_element < before_element:\n S2 = after_element + 1\n S1 = 0\n S1_collect.append(S1)\n S2_collect.append(S2)\n S3_collect.append(0)\n # then S3 = 1, S1=0, S2=0\n else:\n S1_collect.append(0)\n S2_collect.append(0)\n S3_collect.append(1)\n # if the asparagine is not placed in a helix then S1,S2,S3 = 0\n else:\n S1_collect.append(0)\n S2_collect.append(0)\n S3_collect.append(0)\n return S1_collect, S2_collect, S3_collect\n\n###############################################################################\n\ndef S11S12_find(strukture_string):\n \"\"\" Function finds a value for ether S11 or S12.\n A op to 5 long string is inputted for upstream or downstream of a Asn.\n Function is called in function end_chain.\n \"\"\"\n S = 0\n for char in strukture_string:\n if char == 'T':\n S += 1\n else:\n break\n return S\n\n###############################################################################\n\ndef end_chain(asn_pos_lst, structure_data):\n \"\"\" Function calculates S10-S12 from the structural data\n and from a list of Asn positions and a string of the structure_data\n \"\"\"\n S10_collect, S11_collect, S12_collect = [], [], []\n STRUCTURE_LENGTH = len(structure_data)\n # loops through all the Asn residues found\n for asn_pos in asn_pos_lst:\n # looks at the last 20 amino acids in the peptide chain\n if asn_pos >= STRUCTURE_LENGTH - 20 and structure_data[asn_pos] == 'T':\n # the structure data from the residues +3 and -3 from the Asn\n downstream3 = structure_data[asn_pos:asn_pos+3]\n upstream3 = structure_data[asn_pos:asn_pos+3]\n\n # if the structures is 3 from the end of the chain and\n # contains only coil structures (T) then S10 = 1\n if ('TTT' in downstream3 or 'TTT' in upstream3):\n S10 = 1\n S10_collect.append(S10)\n # if the chain is further than 3 from helix or beta sheet\n # then S10 = 0 structures\n else:\n S10_collect.append(0)\n # the structure data from +5 and -5 of the Asn\n downstream = structure_data[asn_pos:asn_pos+5]\n upstream_rev = structure_data[asn_pos-1:asn_pos-6:-1]\n # find S11\n # looks at the secondary structures of the Asn and exceeding -5 on N-term\n # by calling function S11S12_find\n S11 = S11S12_find(upstream_rev)\n S11_collect.append(S11)\n # find S12\n # looks at the secondary structures of the Asn and following +5 on C-term\n # by calling function S11S12_find\n S12 = S11S12_find(downstream)\n S12_collect.append(S12)\n # if the Asn is not placed at the end of the chain and not in a coil\n # then S10,S11 and S12 are 0\n else:\n S10_collect.append(0)\n S11_collect.append(0)\n S12_collect.append(0)\n return S10_collect, S11_collect, S12_collect\n\n####################### 4) DEAMIDATION CALCULATOR ##########################\n\ndef half_time_calculator(S1_collect, S2_collect, S3_collect, S7_collect, S8_collect, S10_collect, S11_collect, S12_collect):\n \"\"\"Function outputs a list of deamination half times for each glutamine and asparagine and\n a list of with information that can be converted to degradation\n \"\"\"\n # calculation deamidation for each Gln residues from the primary structure\n ID_lst = [1 / CD for CD in sum_gln_data]\n\n # calculates the deamination half time for all Asn in the 3D structure\n CD_lst = []\n ASN_LIST_LENGTH = len(asn_pos_lst)\n for element in range(ASN_LIST_LENGTH):\n # if S5 = 0 a division by zero error is avoided by inserting a 1 in the denominator\n\n f = 0.48 * ( (1.0) * (S1_collect[element]) + (3.1) * (S2_collect[element])\n + 10 * (S3_collect[element])\n + 0.5 * (S7_collect[element]) + 3.2 * (S8_collect[element])\n + 2.0 * (1 - S10_collect[element]) + 0.26 * (5 - S11_collect[element])\n + 0.62 * (5 - S12_collect[element])\n )\n # calculate the individual halftime(CD) of each Asn\n prim_half_time = sum_asn_data[element]\n CD = (0.01) * prim_half_time * math.exp(f)\n CD_lst.append(CD)\n\n # prepares the individual\n ID_val = 1 / CD\n ID_lst.append(ID_val)\n return ID_lst\n\n############################## MAIN SCRIPT #################################################\n\n# user defined PDB ID\npdb_id = input('Pleas write a PDB ID: ')\n# loads PDB file from the web creates a list of the amino acids in the protein\naa_seq = load_pdb(pdb_id)\n# caluclates the primary half time\n(asn_pos_lst, sum_asn_data, sum_gln_data) = primary_half_time(aa_seq)\n# sends a request to the DSSP program on the computer.\n# DSSP predicts the secondary structure this is given as output\ndssp_input = dssp_import()\n# finds the S7-S8 values and finds structal_data\nS7_collect, S8_collect, structure_data = HH_bond(dssp_input)\n# finds the S values related to helix structe\nS1_collect, S2_collect, S3_collect = helix_S(asn_pos_lst, structure_data)\n# finds the S values related to the end of the chain\nS10_collect, S11_collect, S12_collect = end_chain(asn_pos_lst, structure_data)\n\nLENGTH = len(asn_pos_lst)\nif ( len(S1_collect) != LENGTH and len(S2_collect) != LENGTH and\n len(S3_collect) != LENGTH and len(S7_collect) != LENGTH and\n len(S8_collect) != LENGTH and len(S10_collect) != LENGTH and\n len(S11_collect) != LENGTH and len(S12_collect) != LENGTH ):\n sys.stdout.write('The S-value lists are not the same length\\n')\n sys.exit(1)\n\n# calculates the prerequisites for the 3D half time of the protein\nID_lst = half_time_calculator(S1_collect, S2_collect, S3_collect, S7_collect, S8_collect, S10_collect, S11_collect, S12_collect)\n\n# the total deamidation half time of total protein:\ntry:\n # calculates the half time of the protein if ID list is not zero\n half_time_of_total_protein = 100 / sum(ID_lst)\n\nexcept ZeroDivisionError as error:\n sys.stdout.write('The peptide has no asparagine residues, therefor the half time cannot be calculated\\n')\n sys.exit(1)\n\nprint('Protein half time is estimated to ', '%.1f' % half_time_of_total_protein, 'days')\n", "sub_path": "Deaminator.py", "file_name": "Deaminator.py", "file_ext": "py", "file_size_in_byte": 21856, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.exit", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.ConnectionError", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 32, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 55, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 56, "usage_type": "call"}, {"api_name": "re.search", "line_number": 72, "usage_type": "call"}, {"api_name": "re.search", "line_number": 73, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 109, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 115, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 115, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 116, "usage_type": "call"}, {"api_name": "math.log", "line_number": 127, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 127, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 479, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 510, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 510, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 511, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 522, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 522, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 523, "usage_type": "call"}]} +{"seq_id": "517462349", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 23 18:32:30 2021\n\n@author: amadeu\n\"\"\"\n\n\n######## data preprocessing #####\nimport torch\nfrom collections import Counter\nimport numpy as np\nfrom numpy import array\nfrom torch.utils.data import Dataset,DataLoader\n\nfrom numpy import array\nfrom numpy import argmax\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\n\nfrom sklearn.model_selection import train_test_split\n\nimport math\n\n# data_file = 'data/traffic.txt'\n# seq_size, batch_size, K = 15, 2, 2\n\n \ndef data_preprocessing(data_file, seq_size, batch_size, K):\n \n def open_data(data_file):\n with open(data_file, 'r') as f:\n text = f.read()\n \n text = text.split(\"\\n\") # there are 862 timesteps after each \\n \n # so we have 17544 elements, and each of these elements have 862 time steps\n text = text[:-1]\n \n all_samples = []\n for sample in text: # each samples consists of 862 time steps\n sample = sample.split(\",\")\n sample = np.array(sample)\n sample = sample.astype(float)\n\n all_samples.append(sample)\n \n \n return all_samples\n\n\n def create_sequences(all_samples, seq_size, K): \n x = list()\n y = list()\n \n #data_len = len(all_samples)\n #train_len = math.floor(0.6*data_len)\n #valid_len = math.floor(0.2*data_len)\n #test_len = math.floor(0.2*data_len)\n \n for sample in all_samples: # change here\n \n for i in range(len(sample)):\n \n # i=0\n # seq_size=10\n idx = i + seq_size #sequence end\n \n if (idx+K) > len(sample)-1: \n break\n \n # add K positions to label to predict the K next timesteps\n feat_seq, target_seq = sample[i:idx], sample[idx:(idx+K)] # target labels for CNN\n x.append(feat_seq)\n y.append(target_seq)\n \n \n return array(x), array(y)\n\n\n\n class get_data(Dataset):\n def __init__(self,feature,target):\n self.feature = feature\n self.target = target\n def __len__(self):\n return len(self.feature)\n def __getitem__(self,idx):\n item = self.feature[idx]\n label = self.target[idx]\n return item,label\n \n all_samples = open_data(data_file)#/home/scheppacha/data/trainset.txt')\n \n x, y = create_sequences(all_samples, seq_size, K)\n \n rest_feat, test_feat, rest_targ, test_targ = train_test_split(\n x, y, test_size=0.1) # 10%\n \n train_feat, valid_feat, train_targ, valid_targ = train_test_split(\n rest_feat, rest_targ, test_size=0.222) # 10% in paper\n \n train = get_data(train_feat, train_targ)# \n valid = get_data(valid_feat, valid_targ)\n test = get_data(test_feat, test_targ)\n\n \n train_loader = torch.utils.data.DataLoader(train, batch_size, shuffle=True)# shuffle ensures random choices of the sequences\n valid_loader = torch.utils.data.DataLoader(valid, batch_size, shuffle=True)\n test_loader = torch.utils.data.DataLoader(test, batch_size, shuffle=False)\n\n \n return train_loader, valid_loader, test_loader", "sub_path": "models/data_preprocessing.py", "file_name": "data_preprocessing.py", "file_ext": "py", "file_size_in_byte": 3394, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 82, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 108, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 109, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 110, "usage_type": "attribute"}]} +{"seq_id": "163526406", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 2 20:34:21 2020\n\n@author: AKASH DIXIT\n\"\"\"\n\nimport http.client, urllib.request, urllib.parse, urllib.error\nimport os\nimport cv2\nimport requests\nimport json\nimport time\nfor i in range(0, 2):\n cam=cv2.VideoCapture(0)\n \n subscription_key = 'xxxxxxxxxxxxxxxxxxxxxxxxx'\n ret,img=cam.read()\n cv2.imwrite(\"capturePerson/image\"+\".jpg\",img)\n cv2.waitKey(100)\n cv2.imshow(\"Face\",img)\n image_path = \"C:/Users/AKASH DIXIT/Documents/GitHub/Microsoft_Cognitive_Services_Face_API/capturePerson/image.jpg\"\n image_data = open(image_path, \"rb\").read()\n \n headers = {\n # Request headers\n 'Content-Type': 'application/octet-stream',\n 'Ocp-Apim-Subscription-Key': subscription_key,\n }\n \n params = urllib.parse.urlencode({\n # Request parameters\n 'returnFaceId': 'true',\n 'returnFaceLandmarks': 'false',\n 'returnFaceAttributes': 'Age,emotion',\n \n 'recognitionModel': 'recognition_01',\n 'returnRecognitionModel': 'false',\n 'detectionModel': 'detection_01',\n })\n \n #try:\n #conn = http.client.HTTPSConnection('centralindia.api.cognitive.microsoft.com')\n #conn.request(\"POST\", \"/face/v1.0/detect?%s\" % params, headers=headers, data=image_data)\n face_recognition_url = \"https://centralindia.api.cognitive.microsoft.com/face/v1.0/detect?%s\" % params\n response = requests.post(face_recognition_url, headers=headers, data=image_data, params=params)\n #response = conn.getresponse()\n p = response.json()\n print(p)\n d = p[0]['faceId']\n for a in p:\n emo = a['faceAttributes']['emotion']\n value_max = max(emo.keys(), key=(lambda v: emo[v]))\n key_max = max(emo.keys(), key=lambda k: emo[k])\n# print(value_max)\n\n print('Maximum Value: ',emo[value_max])\n print('Maximum key: ',key_max)\n #print(d)\n \n headers1 = { \n # Request headers\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': subscription_key\n }\n body1={\n \"personGroupId\": \"friends\",\n \"faceIds\": [ \n d]\n \n }\n params1 = urllib.parse.urlencode({\n })\n \n try:\n conn = http.client.HTTPSConnection('centralindia.api.cognitive.microsoft.com')\n conn.request(\"POST\", \"/face/v1.0/identify?%s\" % params1, str(body1), headers1)\n response = conn.getresponse()\n data = response.read()\n #print(data)\n d= json.loads(data)\n #print(type(d))\n s= d[0]['candidates'][0]['personId']\n \n #print(s)\n if s == \"7cc20792-2367-4ad2-b046-2073478bc5d7\":\n print(\"shefali\")\n else:\n print(\"NOT FOUND\")\n \n conn.close()\n except Exception as e:\n print(\"[Errno {0}] {1}\".format(e.errno, e.strerror))\n \n os.remove(\"C:/Users/AKASH DIXIT/Documents/GitHub/Microsoft_Cognitive_Services_Face_API/capturePerson/image.jpg\")\n i=i+1\n time.sleep(4)\ncam.release()\ncv2.destroyAllWindows()\n#if p == \"7cc20792-2367-4ad2-b046-2073478bc5d7\"", "sub_path": "face_and_emotion_recognition.py", "file_name": "face_and_emotion_recognition.py", "file_ext": "py", "file_size_in_byte": 3102, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "cv2.VideoCapture", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib.request.parse.urlencode", "line_number": 31, "usage_type": "call"}, {"api_name": "urllib.request.parse", "line_number": 31, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 31, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 46, "usage_type": "call"}, {"api_name": "urllib.request.parse.urlencode", "line_number": 72, "usage_type": "call"}, {"api_name": "urllib.request.parse", "line_number": 72, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 72, "usage_type": "name"}, {"api_name": "http.client.client.HTTPSConnection", "line_number": 76, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 76, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 76, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 81, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 95, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 97, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "309241406", "text": "# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n\"\"\"\nCommon data processing utilities that are used in a\ntypical object detection data pipeline.\n\"\"\"\nimport numpy as np\nimport torch\nfrom fvcore.common.file_io import PathManager\nfrom PIL import Image, ImageOps\n\n\n# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601\n_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]\n_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]\n\n\ndef convert_PIL_to_numpy(image, format):\n \"\"\"\n Convert PIL image to numpy array of target format.\n\n Args:\n image (PIL.Image): a PIL image\n format (str): the format of output image\n\n Returns:\n (np.ndarray): also see `read_image`\n \"\"\"\n if format is not None:\n # PIL only supports RGB, so convert to RGB and flip channels over below\n conversion_format = format\n if format in [\"BGR\", \"YUV-BT.601\"]:\n conversion_format = \"RGB\"\n image = image.convert(conversion_format)\n image = np.asarray(image)\n # PIL squeezes out the channel dimension for \"L\", so make it HWC\n if format == \"L\":\n image = np.expand_dims(image, -1)\n\n # handle formats not supported by PIL\n elif format == \"BGR\":\n # flip channels if needed\n image = image[:, :, ::-1]\n elif format == \"YUV-BT.601\":\n image = image / 255.0\n image = np.dot(image, np.array(_M_RGB2YUV).T)\n\n return image\n\n\ndef convert_image_to_rgb(image, format):\n \"\"\"\n Convert an image from given format to RGB.\n\n Args:\n image (np.ndarray or Tensor): an HWC image\n format (str): the format of input image, also see `read_image`\n\n Returns:\n (np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8\n \"\"\"\n if isinstance(image, torch.Tensor):\n image = image.cpu().numpy()\n if format == \"BGR\":\n image = image[:, :, [2, 1, 0]]\n elif format == \"YUV-BT.601\":\n image = np.dot(image, np.array(_M_YUV2RGB).T)\n image = image * 255.0\n else:\n if format == \"L\":\n image = image[:, :, 0]\n image = image.astype(np.uint8)\n image = np.asarray(Image.fromarray(image, mode=format).convert(\"RGB\"))\n return image\n\n\ndef read_image(file_name, format=None):\n \"\"\"\n Read an image into the given format.\n Will apply rotation and flipping if the image has such exif information.\n\n Args:\n file_name (str): image file path\n format (str): one of the supported image modes in PIL, or \"BGR\" or \"YUV-BT.601\"\n\n Returns:\n image (np.ndarray): an HWC image in the given format, which is 0-255, uint8 for\n supported image modes in PIL or \"BGR\"; float (0-1 for Y) for YUV-BT.601.\n \"\"\"\n with PathManager.open(file_name, \"rb\") as f:\n image = Image.open(f)\n # capture and ignore this bug: https://github.com/python-pillow/Pillow/issues/3973\n try:\n image = ImageOps.exif_transpose(image)\n except Exception:\n raise FileNotFoundError\n\n return convert_PIL_to_numpy(image, format)\n", "sub_path": "projects/SOD/demo/detection_utils.py", "file_name": "detection_utils.py", "file_ext": "py", "file_size_in_byte": 3169, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.asarray", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 74, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 74, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 74, "usage_type": "name"}, {"api_name": "fvcore.common.file_io.PathManager.open", "line_number": 91, "usage_type": "call"}, {"api_name": "fvcore.common.file_io.PathManager", "line_number": 91, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 92, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 92, "usage_type": "name"}, {"api_name": "PIL.ImageOps.exif_transpose", "line_number": 95, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 95, "usage_type": "name"}]} +{"seq_id": "525035841", "text": "import cv2\nimport cv\nimport caffe\nimport numpy as np\nimport ast\nimport datetime\nimport time\n\nclass ImshowLayer(caffe.Layer):\n def setup(self, bottom, top):\n assert len(top) == 0, 'ImshowLayer has no output.'\n self.param_ = ast.literal_eval(self.param_str)\n if 'resize' not in self.param_ or self.param_['resize'] == 0:\n self.resize = False\n else:\n self.resize = True\n self.size = self.param_['resize']\n self.save = self.param_.get('save', None)\n self.scale = self.param_.get('scale', [])\n self.format = self.param_.get('format', [])\n\n def reshape(self, bottom, top):\n pass\n\n def forward(self, bottom, top):\n batch_size = bottom[0].num\n height = 0\n width = 0\n if self.resize:\n width = self.size * len(bottom)\n height = self.size\n else: \n for i in xrange(len(bottom)):\n width += bottom[i].width\n height = max(height, bottom[i].height)\n buff = np.zeros((height*batch_size, width, 3), dtype = np.uint8)\n #import pdb \n #pdb.set_trace()\n for i in xrange(batch_size):\n cur = 0\n for j in xrange(len(bottom)):\n img = bottom[j].data[i].transpose((1,2,0))\n if len(self.scale):\n assert len(self.scale) == len(bottom)\n img *= self.scale[j]\n img = img.astype(np.uint8)\n if len(self.format):\n assert len(self.format) == len(bottom)\n if self.format[j] == 'ycrcb':\n img = cv2.cvtColor(img, cv.CV_YCrCb2BGR)\n if img.shape[2] == 1:\n img = np.tile(img, 3)\n if self.resize:\n widthj = heightj = self.size\n img = cv2.resize(img, (self.size, self.size))\n else:\n widthj = bottom[j].width\n heightj = bottom[j].height\n buff[i*height:i*height+heightj, cur:cur+widthj, :] = img\n cur += widthj\n if self.save is None:\n cv2.imshow('buff', buff)\n cv2.waitKey(0)\n else:\n cv2.imwrite(self.save+'%f'%time.time()+'.jpg', buff)\n\n def backward(self, top, propagate_down, bottom):\n pass", "sub_path": "python/caffe_util/imshow_layer.py", "file_name": "imshow_layer.py", "file_ext": "py", "file_size_in_byte": 2381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "caffe.Layer", "line_number": 9, "usage_type": "attribute"}, {"api_name": "ast.literal_eval", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 50, "usage_type": "call"}, {"api_name": "cv.CV_YCrCb2BGR", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 65, "usage_type": "call"}, {"api_name": "time.time", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "149929026", "text": "import json\nimport wikipedia\nimport hashlib\nfrom os import path\nfrom IteratorsGeneratorsYield.logger.loggerDecor import logger_path\n\n\nPATH_TO_FILE = path.abspath(path.join(path.dirname(__file__), 'output.txt'))\n\n\nclass MyIter:\n\n def __init__(self, description):\n self.description = description\n self.start_index = -1\n\n @property\n def name_country(self):\n try:\n return self.description[self.start_index]['name']['common']\n except wikipedia.exceptions.DisambiguationError:\n return ''\n\n def write_file(self):\n try:\n response = wikipedia.page(self.name_country)\n except wikipedia.exceptions.DisambiguationError:\n response = wikipedia.page(f\"{self.name_country} (country)\")\n with open(PATH_TO_FILE, 'a', encoding='utf8') as file:\n file.write(f'{response.title}: {response.url}\\n')\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.start_index += 1\n if self.start_index != len(self.description):\n self.write_file()\n return self.name_country\n else:\n raise StopIteration\n\n\n@logger_path('logsFolder')\ndef hash_md5(self):\n hash_string = hashlib.md5()\n with open(self, encoding='utf8') as file:\n for string_data in file:\n hash_string.update(string_data.encode())\n yield hash_string.hexdigest()\n\n\nif __name__ == '__main__':\n with open('countries.json', encoding='utf8') as f:\n data = json.load(f)\n for item in MyIter(data):\n print(item)\n for item in hash_md5(PATH_TO_FILE):\n print(item)\n", "sub_path": "iter.py", "file_name": "iter.py", "file_ext": "py", "file_size_in_byte": 1636, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.abspath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "wikipedia.exceptions", "line_number": 21, "usage_type": "attribute"}, {"api_name": "wikipedia.page", "line_number": 26, "usage_type": "call"}, {"api_name": "wikipedia.exceptions", "line_number": 27, "usage_type": "attribute"}, {"api_name": "wikipedia.page", "line_number": 28, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 46, "usage_type": "call"}, {"api_name": "IteratorsGeneratorsYield.logger.loggerDecor.logger_path", "line_number": 44, "usage_type": "call"}, {"api_name": "json.load", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "273407630", "text": "#! /usr/bin/python2.7\n# -*- coding: utf-8 -*-\n\ndef main():\n import sys, optparse, os, math, shlex, json, re, subprocess, glob\n\n usage = \"\"\"INPUT | varyAndDecline.py [options] COMBO > OUTPUT\n\n Pipes data through variant generation and prints results to stdout, and lots of info to stderr\n An LM must be specified with one of:\n --lang abc-DEF (finds latest nightly or weekly build for abc-DEF)\n --lm /path/to/product (uses LM in product dir)\n --variant_type must be specified\n For a single-column input with no field name, --field_name must be specified\n \n Typical inputs are merged-records datalists with priors, or column 7 (\"content\") of userdata TSV.\n\n Priors in input are ignored and replaced with exponentially decaying priors which are normalized but floored so they are all >0.\n (This is not ideal because it assigns more probability mass to entries for which more outputs are generated. Better ideas welcome.)\n\n All the following are equivalent valid inputs:\n Jingle Bells (with --variant_type music --field_name s)\n {\"s\":\"Jingle Bells\"} (with --variant_type music)\n s \"Jingle Bells\" (with --variant_type music)\n prior=0.0002032 Jingle Bells (with --variant_type music --field_name s)\n fn John (with --variant_type contacts --field_name fn)\n fn John ln Smith (with --variant_type contacts)\n {\"fn\":\"John\", \"ln\":\"Smith\"} (with --variant_type contacts)\n\n JSON dict literals (as above) can be typed on the command line but double quotes may need to be escaped, or the entire dict wrapped in single quotes.\n \n Variant types, field names, and platforms are defined in token_s.enh (TknTemplate) and may vary by combo.\n\n Most common variant types: \"contacts\", \"music\"\n Sample field names for variant type \"contacts\": \"fn\", \"mn\", \"ln\", \"fnln\", \"title\", \"ph\", \"fr\", \"fnp\", etc\n Sample field names for variant type \"music\": \"s\" (song title), \"al\" (album title), \"ar\" (artist name)\n \"\"\"\n\n parser = optparse.OptionParser(usage)\n parser.add_option('--lang', '-l', action='store', help='use latest nightly or weekly build for this combo')\n parser.add_option('--lm', '-m', action='store', help='LM build to use (product dir)')\n parser.add_option('--variant_type', '-v', action='store', help='variant type')\n parser.add_option('--platform', action='store', default='default', help='platform for platform-specific variant generation')\n parser.add_option('--field_name', '-f', action='store', help='field name to use if data contains only one field')\n parser.add_option('--token_s', '-t', action='store', help='token_s.enh to use (must be compatible with LM)')\n parser.add_option('--exp_decay', action='store', default=0.001, help='decay rate for priors (default %default)')\n parser.add_option('--min_prior', action='store', default=1e-16, help='floor for priors (default %default)')\n parser.add_option('--param_config', '-p', action='store', help='Tokenizer param config to use (defaults to VariantGenerationLME for variant type \"contacts\", and VariantGeneration for all others')\n parser.add_option('--tp_build', action='store', help='path to textproc release e.g. /lm/releases/textproc/14.02.000.01046 (defaults to version in LM textproc dir)')\n\n (options, args) = parser.parse_args(sys.argv[1:])\n\n if len(args) == 0 and not options.lang and not options.lm:\n sys.stderr.write(\"Either --lang or --lm must be specified\\n\\n\")\n parser.print_help()\n sys.exit()\n if len(args) == 1 and not (options.lang or options.lm):\n sys.stderr.write('lang-country code as last arg is still supported for legacy reasons but deprecated in favor of \"--lang abc-DEF\"\\n')\n options.lang = args[0]\n\n # get prod dir to use\n if options.lm:\n prod = os.path.abspath(options.lm)\n if not os.path.exists(\"%s/textproc/bin/token\" % prod): sys.stderr.write(\"No textproc found in %s; need a product dir\\n\" % prod); sys.exit(1)\n else:\n nightly_builds = glob.glob(\"/lm/build/nightly/test/rootdir/lm/build/official/mobi/%s/search/large/*/product/mainline/dispatch.voc\" % options.lang)\n weekly_builds = glob.glob(\"/lm/build/weekly/rootdir/lm/build/official/mobi/%s/search/large/*/product/mainline/dispatch.voc\" % options.lang)\n if len(nightly_builds) > 0: prod = os.path.dirname(max(nightly_builds))\n elif len(weekly_builds) > 0: prod = os.path.dirname(max(weekly_builds))\n else:\n sys.stderr.write(\"No mobi LM build found for combo %s. Passing inputs through to output with no processing.\\n\" % options.lang)\n sys.stdout.write(sys.stdin.read())\n\n sys.stderr.write(\"Using LM at %s\\n\" % prod)\n\n if not options.variant_type: sys.stderr.write(\"No variant type specified\\n\"); sys.exit(1)\n\n if options.param_config == None:\n options.param_config = 'VariantGenerationLME' if options.variant_type == 'contacts' else 'VariantGeneration'\n \n # get TP build to use\n if options.tp_build == None:\n # hacky way to get TP version for voc\n p = subprocess.Popen(\"%s/textproc/bin/token\" % prod, stdout=subprocess.PIPE, env={'LD_LIBRARY_PATH': \"%s/textproc/bin\" % prod})\n tp_version = re.search(r'version ([0-9\\.]+)', p.communicate()[0].splitlines()[0]).group(1)\n options.tp_build = \"/lm/releases/textproc/%s\" % tp_version\n \n # gotta use python 2.6 with TP releases <14.02.000.01046, hence this atrocious hack\n if int(options.tp_build.split('/')[-1].replace('.', '')) < 140200001046 and sys.version_info >= (2, 7):\n subprocess.call(['/usr/bin/python2.6', '-x'] + sys.argv)\n sys.exit()\n\n if 'LD_LIBRARY_PATH' in os.environ:\n sys.stderr.write(\"LD_LIBRARY_PATH is set to %s and may affect TP pywrapper loading\\n\" % os.environ['LD_LIBRARY_PATH'])\n\n sys.stderr.write(\"Using TP at %s with param config %s\\n\" % (options.tp_build, options.param_config))\n sys.stderr.write(\"-----\\n\")\n \n # so all the TP and MREC logging does not go to stdout\n real_stdout = sys.stdout\n sys.stdout = sys.stderr\n \n # load TP python wrapper correctly depending on version\n if sys.version_info >= (2, 7): # \"normal\" loading for current TP releases\n sys.path += ['%s/export/ilglr.env/lib64/python2.7/site-packages' % options.tp_build]\n import textproc.mrecutil as mu\n import textproc.tp_run_util as tp\n else: # legacy loading - must be using a TP <14.02.000.01046\n sys.path += ['%s/export/ilglr/pysdapi.gz' % options.tp_build]\n sys.path += ['%s/export/ilglr/pywrapper.gz' % options.tp_build]\n sys.path += ['%s/alien/python_libs' % options.tp_build]\n sys.path += ['%s/alien/python_libs/networkx.zip' % options.tp_build]\n import mrecutil as mu\n import tp_run_util as tp\n \n tp.initTP()\n hVoc = mu.openVoc(prod, vocName='dispatch.voc', svcName='dispatch.svc')\n sParamSetSpec = mu.openParamSet(prod)\n\n hLexicon = tp.TPLexiconHandle(tp.TPLexiconHandle_None)\n tp.TPLexicon_Open(tp.byref(tp.TPDataSpec(1, os.path.join(prod, 'lexicon.enh'))),\n tp.IHANDLE(hVoc),\n tp.IHANDLE(sParamSetSpec[0].hParamSetHolder),\n tp.IHANDLE(sParamSetSpec[0].hLMScoringParamSet),\n None,\n tp.byref(hLexicon))\n\n tokenizer_data = options.token_s if options.token_s else os.path.join(prod, 'token_s.enh')\n if options.token_s: sys.stderr.write(\"Using %s\\n\" % tokenizer_data)\n hTokenizer = tp.TPTokenHandle(tp.NONE)\n tp.TPToken_Open(None,\n tp.byref(tp.TPDataSpec(1, tokenizer_data)),\n hLexicon,\n tp.byref(hTokenizer))\n\n tokenInfo = tp.TPTokenInfo()\n tp.TPToken_GetInfo(hTokenizer, tp.byref(tokenInfo))\n hParamConfig = tp.TPParamConfigHandle()\n tp.TPParam_GetConfigHandle(tp.IHANDLE(tokenInfo.hParamManager), options.param_config, tp.byref(hParamConfig))\n\n num_in = 0\n outputs = []\n has_priors = None\n unnorm_priors = []\n z = 0.0\n\n for line in sys.stdin:\n sys.stderr.write('*******************\\n')\n line = line.strip()\n if len(line) == 0: continue\n\n if has_priors == None: has_priors = line.startswith('prior=')\n\n if has_priors:\n fields = line.split()\n prior = fields[0]\n data = ' '.join(fields[1:])\n else:\n data = line\n \n if options.field_name:\n data_dict = dict([(options.field_name, data.decode('utf-8'))])\n else:\n if data.startswith(\"{\") and '\"' not in data:\n # hack to allow JSON dict literals on command line\n data = \"{\" + re.sub(r'([^\\s:]+): ([^\\s,]+)', r'\"\\1\": \"\\2\"', data[1:-1]) + \"}\"\n try:\n data_dict = json.loads(data)\n except ValueError:\n tokens = shlex.split(data)\n if len(tokens) % 2 != 0: sys.stderr.write(\"Bad input format\\n\"); sys.exit(1)\n data_dict = dict([(tokens[i], tokens[i+1]) for i in range(0, len(tokens), 2)])\n \n dict_str = str(data_dict).encode('utf-8')\n sys.stderr.write(\"original: %s\\n\" % dict_str)\n pDataInfo = (tp.TPTokenUserData * len(data_dict))()\n for (i, d) in enumerate(data_dict.items()):\n (pDataInfo[i].pszField, pDataInfo[i].pszValue) = (d[0], d[1].encode('utf-8'))\n num_in += 1\n\n # this is to find out how many outputs and how long a category string we need to allocate space for. hardcoding to save cycles\n nNeeded = tp.c_uint(32)\n nNeededCategory = tp.c_uint(32)\n # nNeeded = tp.c_uint(0)\n # nNeededCategory = tp.c_uint(0)\n # tp.TPToken_VaryAndDecline(hTokenizer, hParamConfig,\n # str(options.variant_type), str(options.platform),\n # pDataInfo, len(pDataInfo),\n # None, None, 0, tp.byref(nNeeded),\n # None, 0, tp.byref(nNeededCategory))\n # print nNeeded.value, nNeededCategory.value\n\n hResultArray = (tp.TPTokenResultHandle * nNeeded.value)()\n hFlagArray = (tp.TPFlags * nNeeded.value)()\n pszCategory = (tp.c_char * nNeededCategory.value)()\n tp.TPToken_VaryAndDecline(hTokenizer, hParamConfig,\n str(options.variant_type), str(options.platform),\n pDataInfo, len(pDataInfo),\n hResultArray, hFlagArray, nNeeded.value, None,\n pszCategory, nNeededCategory.value, None)\n sys.stderr.write(\"category: %s\\n\" % pszCategory.value)\n\n # sort/uniq/assign prior over ALL paths rather than just over paths within each result\n all_paths = set()\n\n for i in range(nNeeded.value):\n if not hResultArray[i]: break\n paths = []\n tp.TGraph(hTokenizer, hResultArray[i], \"\").df(paths)\n\n # now uniq-ing all results and paths into one list, so we lose flags which are per-result\n # sys.stderr.write(\"flags: %d\\n\" % hFlagArray[i])\n # need python wrapper access to decFlagToString if we want to decode flags (morph info, etc)\n\n for p in paths:\n if len(p) == 0: continue\n if type(p[0]) == str: all_paths.add(' '.join(q for q in p))\n if type(p[0]) == tuple: all_paths.add(' '.join(q[0] for q in p))\n\n tp.TPToken_DeleteResult(hTokenizer, hResultArray[i])\n\n # sort by length so shortest (first) path gets highest prior\n for p in sorted(list(all_paths), key=lambda x: len(''.join(x).decode('utf-8'))):\n outputs.append(p)\n if has_priors:\n unnorm_prior = math.exp(-1.0 * float(options.exp_decay) * len(outputs))\n unnorm_priors.append(unnorm_prior)\n z += unnorm_prior\n sys.stderr.write(\"output: %s\\n\" % p)\n\n for i in range(len(outputs)):\n if has_priors:\n prior = max(unnorm_priors[i]/z, float(options.min_prior))\n real_stdout.write(\"prior=%.16f\\t\" % prior)\n real_stdout.write(\"%s\\n\" % outputs[i])\n\n tp.TPToken_Close(hTokenizer)\n tp.TPLexicon_Close(hLexicon)\n mu.closeParamSet(sParamSetSpec)\n mu.closeVoc(hVoc)\n\n avg = len(outputs)/float(num_in) if num_in > 0 else 0.0\n sys.stderr.write(\"%d inputs\\n%d outputs\\navg %.2f outputs per input\\n\" % (num_in, len(outputs), avg))\n\nif __name__ == '__main__':\n main()\n", "sub_path": "tools/scripts/varyAndDecline.py", "file_name": "varyAndDecline.py", "file_ext": "py", "file_size_in_byte": 12743, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "optparse.OptionParser", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 54, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 64, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 66, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 71, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 72, "usage_type": "attribute"}, {"api_name": "sys.stdin.read", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 72, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 74, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 74, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 76, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 76, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 84, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 84, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 85, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 89, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 90, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 90, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 91, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 93, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 96, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 96, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 97, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 97, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 100, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 104, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tp_run_util.initTP", "line_number": 116, "usage_type": "call"}, {"api_name": "mrecutil.openVoc", "line_number": 117, "usage_type": "call"}, {"api_name": "mrecutil.openParamSet", "line_number": 118, "usage_type": "call"}, {"api_name": "tp_run_util.TPLexiconHandle", "line_number": 120, "usage_type": "call"}, {"api_name": "tp_run_util.TPLexiconHandle_None", "line_number": 120, "usage_type": "attribute"}, {"api_name": "tp_run_util.TPLexicon_Open", "line_number": 121, "usage_type": "call"}, {"api_name": "tp_run_util.byref", "line_number": 121, "usage_type": "call"}, {"api_name": "tp_run_util.TPDataSpec", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tp_run_util.IHANDLE", "line_number": 122, "usage_type": "call"}, {"api_name": "tp_run_util.IHANDLE", "line_number": 123, "usage_type": "call"}, {"api_name": "tp_run_util.IHANDLE", "line_number": 124, "usage_type": "call"}, {"api_name": "tp_run_util.byref", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 129, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 129, "usage_type": "attribute"}, {"api_name": "tp_run_util.TPTokenHandle", "line_number": 130, "usage_type": "call"}, {"api_name": "tp_run_util.NONE", "line_number": 130, "usage_type": "attribute"}, {"api_name": "tp_run_util.TPToken_Open", "line_number": 131, "usage_type": "call"}, {"api_name": "tp_run_util.byref", "line_number": 132, "usage_type": "call"}, {"api_name": "tp_run_util.TPDataSpec", "line_number": 132, "usage_type": "call"}, {"api_name": "tp_run_util.byref", "line_number": 134, "usage_type": "call"}, {"api_name": "tp_run_util.TPTokenInfo", "line_number": 136, "usage_type": "call"}, {"api_name": "tp_run_util.TPToken_GetInfo", "line_number": 137, "usage_type": "call"}, {"api_name": "tp_run_util.byref", "line_number": 137, "usage_type": "call"}, {"api_name": "tp_run_util.TPParamConfigHandle", "line_number": 138, "usage_type": "call"}, {"api_name": "tp_run_util.TPParam_GetConfigHandle", "line_number": 139, "usage_type": "call"}, {"api_name": "tp_run_util.IHANDLE", "line_number": 139, "usage_type": "call"}, {"api_name": "tp_run_util.byref", "line_number": 139, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 147, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 148, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 148, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 166, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 168, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 170, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 171, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 171, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 171, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 175, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 175, "usage_type": "attribute"}, {"api_name": "tp_run_util.TPTokenUserData", "line_number": 176, "usage_type": "attribute"}, {"api_name": "tp_run_util.c_uint", "line_number": 182, "usage_type": "call"}, {"api_name": "tp_run_util.c_uint", "line_number": 183, "usage_type": "call"}, {"api_name": "tp_run_util.TPTokenResultHandle", "line_number": 193, "usage_type": "attribute"}, {"api_name": "tp_run_util.TPFlags", "line_number": 194, "usage_type": "attribute"}, {"api_name": "tp_run_util.c_char", "line_number": 195, "usage_type": "attribute"}, {"api_name": "tp_run_util.TPToken_VaryAndDecline", "line_number": 196, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 201, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 201, "usage_type": "attribute"}, {"api_name": "tp_run_util.TGraph", "line_number": 209, "usage_type": "call"}, {"api_name": "tp_run_util.TPToken_DeleteResult", "line_number": 220, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 226, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 229, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 229, "usage_type": "attribute"}, {"api_name": "tp_run_util.TPToken_Close", "line_number": 237, "usage_type": "call"}, {"api_name": "tp_run_util.TPLexicon_Close", "line_number": 238, "usage_type": "call"}, {"api_name": "mrecutil.closeParamSet", "line_number": 239, "usage_type": "call"}, {"api_name": "mrecutil.closeVoc", "line_number": 240, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 243, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 243, "usage_type": "attribute"}]} +{"seq_id": "200010446", "text": "\"\"\"\nFYP: Killing wilding pines with helicopter sprayed herbicide\nHelicopter spray model\n\"\"\"\n\n# Import Libraries\nimport math as m\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport time\nimport scipy.sparse as sps\n\n# Constants\nrho_a = 1 # kg/m^3 - constant air density\nk_constant = 7 # constant relating helicopter roll-up to fixed-wing roll-up\nrho_liquid = 1000 # kg/m^3 - constant spray density\nmew_a = 1.789e-5 # kg/m/s - constant air viscocity\ng = np.array([-9.81, 0, 0]) # m/s^2 - gravity acceleration - [z, y, x]\nV_min = 9e-7 # m^3 per m^2 - minimum spray volume required\n\n# Inputs\ninp = {\n 'R': 5, # m - raidus of helicopter propeller\n 'c': 0.25, # m - blade chord length\n 'U_inf': 10.0, # m/s - helicopter speed\n 'V_drop': -10.0, # m/s - droplet initial speed\n 'Omega': 405 * 2 * m.pi / 60, # rad/s - rotor rotational speed\n 'H': 30.0, # m - height of helicopter\n 'W': 1134 * 9.81, # N - weight of helicopter and everything it is carrying, max downwash weight\n 'q_squared': 0.1, # square root of mean squared turbulence.\n 'dry_bulb_temp': 12, # degrees C\n 'RH': 50 # Reletive humidity %\n}\n\ninp_weather = { # Need to get this from somewhere.\n 'Vx': 0, # m/s\n 'Vy': 0, # m/s\n 'Vz': 0 # m/s\n}\n\n# Tree inputs (20x20 grid. 1m per entry):\ntree_data = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\nheli_position = (19, 15) # (columns from left, rows from top) coordinate position in tree matrix\nheli_direction = 45 # Angle (degrees), anticlockwise from ^ direction\ntree_spacing = 1 # (m)\n\n# Boom Inputs:\nboom_length = 0.8 * 2 * inp['R'] # (m)\nn_nozzles = 10\nboom_height = 2 # (m) - height distance from boom to rotor blades\nboom_offset = 0.5 # (m) - distance of boom from helicopter blade centre in positive x direction\n\n# For each Accu-Flo 0.28 nozzle\nnumber_drops = 500\ndroplet_min = 800e-6 # 800 microns\ndroplet_max = 1000e-6 # 1000 microns\n# http://www.bishopequipment.com/technicalinformation.html\n# Manufacturer specifies droplet sizes of 800-1000 microns\n# Log normal will incorporate outliers\ndroplet_avg = 1110e-6 # (VMD) (updated Friday 08th with Volume Based Estimates\nsigma = 0.15 # Standard deviation (updated Friday 08th)\ncone_angle = 20 # Accu-Flo 0.28 has very narrow cone range (20 deg est)\nnozzle_flowrate = 2e-5 # m^3/s\n\n# Sparse Domain:\nnr = 3 # Can change the multiplier on the radius to choose domain size\ns_domain = [(-nr * inp['R'], nr * inp['R']), (1e-12, inp['H'])]\n\n# Numerical Values:\ninp_num = {\n 'n_base': 5, # Dividing streamline sections and number of vortices\n 'nt': 1000 # Max time iterations\n}\n\n# To make velocity field work\nvortex_factor = 0.1\n\n# Complete Domain:\ndef calc_required_domain(n, dx, nz):\n \"\"\"Calculates required domain size based on helicopter and wind speeds.\"\"\"\n\n # Calculate time estimate:\n SF = 3\n t = SF * (-inp_weather['Vz'] - np.sqrt(inp_weather['Vz']**2 - 2 * g[0] * (s_domain[1][1] - s_domain[1][0]))) / g[0]\n\n # Calculate extra distance\n Lx = int((inp_weather['Vx'] - inp['U_inf']) * t / dx) * dx # Need to make sure Ls are multiples of ds\n Ly = int(inp_weather['Vy'] * t / dx) * dx\n\n # Calculate extra nodes required\n nx = int(abs(Lx / dx))\n ny = int(abs(Ly / dx))\n\n # Construct new domain\n domain = [[s_domain[0][0], s_domain[0][1]], # [start_x], [stop_x]\n [s_domain[0][0], s_domain[0][1]]] # [start_y], [stop_y]\n if Lx > 0:\n domain[0][1] = domain[0][1] + Lx\n else:\n domain[0][0] = domain[0][0] + Lx\n\n if Ly > 0:\n domain[1][1] = domain[1][1] + Ly\n else:\n domain[1][0] = domain[1][0] + Ly\n\n # Construct new coordinates\n x, y, z = np.meshgrid(np.linspace(domain[0][0], domain[0][1], n + nx),\n np.linspace(domain[1][0], domain[1][1], n + ny),\n np.linspace(s_domain[1][0], s_domain[1][1], nz))\n\n return x, y, z, domain, nx, ny\n\n# Field Positions\ndef get_coordinates(n):\n \"\"\" Make grid of size nxnxnz for velocity field \"\"\"\n \n # Get x and y direction vector and dx\n x = np.linspace(s_domain[0][0], s_domain[0][1], n)\n dx = abs(x[1] - x[0])\n \n # Calculate appropriate nz, to give same/similar spacing\n nz = int(np.ceil((s_domain[1][1] - s_domain[1][0]) / dx + 1))\n\n # Get smaller domain grid for calculating vortices\n x, y, z = np.meshgrid(x, x, np.linspace(s_domain[1][0], s_domain[1][1], nz))\n\n return [x, y, z, dx, nz]\n\n# Velocity Field Functions\ndef get_downwash_speed_matrixes(w, x, y, z, n):\n \"\"\" Calculate velocities in grid \"\"\"\n # Calculating Constant:\n const = w / (2 * inp['H'])\n\n # Calculating Velocities\n Vx = const * np.sqrt(x**2 + y**2) * np.cos(np.arctan2(y, x))\n Vy = const * np.sqrt(x**2 + y**2) * np.sin(np.arctan2(y, x))\n Vz = -w * z / inp['H']\n\n return [Vx, Vy, Vz]\n\n# Vortex Funcitons\ndef cart2pol(X, Y, Z, r_ds, current_z):\n # horizontal angle in 3d spherical coords\n φ = np.arctan2(Y, X)\n # coordinate of vortex ring line in same plane as point\n xr = r_ds * np.cos(φ)\n yr = r_ds * np.sin(φ)\n zr = current_z\n # coordinate of point from corresponding point on ring\n x = X - xr\n y = Y - yr\n z = Z - zr\n # radial location from corresponding point on ring\n ρ = np.sqrt((x)**2 + (y)**2 + (z)**2)\n # vertical angle from corresponding point on ring\n θ = np.arctan2((z), np.sqrt((x)**2 + (y)**2))\n return(ρ, θ, φ, xr, yr, zr)\n\ndef get_vortex_speed_matrixes(Γ, ρc, X, Y, Z, xr, yr, zr, ρ, θ, φ):\n 'Calculate velocities in grid'\n # velocity of air in polar coordinates (Lamb-Oseen Vortex model)\n Vr = 0\n Vθ = -Γ / (2 * m.pi *ρ) * (1 - np.exp(-ρ**2 / ρc**2))\n Vφ = 0\n # convert velocities of air in to cartesian coordinates\n Vx = Vθ* np.sin(θ) * np.cos(φ)\n Vy = Vθ* np.sin(θ) * np.sin(φ)\n Vz = Vθ* np.cos(θ)\n # #loop thought points with i j k coordinates\n for k in range(len(Z)):\n for j in range(len(Y)):\n for i in range(len(X)):\n if (X[i][j][k]**2 + Y[i][j][k]**2) > (xr[i][j][k]**2 + yr[i][j][k]**2):\n Vz[i][j][k] = -1 * Vz[i][j][k]\n return [Vx, Vy, Vz]\n\n# Streamline function:\ndef get_dividing_streamline(n, Vx, Vy, Vz):\n \"\"\" Get dividing streamline coordinates. \"\"\"\n n = int(n / nr)\n\n # Initialize z\n z = [0.] * n\n\n # X, Y 2D coordinate matrixes\n r = np.linspace(inp['R'], s_domain[0][1], n) # Need this to work out z\n Vr = np.sqrt(Vx[1, :, 1]**2 + Vy[:, 1, 1]**2)\n R, P = np.meshgrid(r, np.linspace(0, 2 * np.pi, n))\n dividing_streamline_x, dividing_streamline_y = R * np.cos(P), R * np.sin(P)\n\n # Top:\n z[0] = inp['H']\n\n # Iterate:\n for i in range(0, n - 1):\n j = int((nr - 1) * n + i)\n z[i + 1] = z[i] + ((s_domain[0][1] - inp['R']) / (n - 1)) * Vz[1, 1, -i - 1] / Vr[j]\n if z[i + 1] < 0:\n z[i + 1] = 0\n\n # Make z matrix\n dividing_streamline_z = np.outer(np.ones(n), z)\n\n return [dividing_streamline_x, dividing_streamline_y, dividing_streamline_z]\n\n# Plotting Functions:\ndef plot_velocity_field_and_dividing_streamline(x, y, z, Vx, Vy, Vz, ds_x, ds_y, ds_z):\n \"\"\"Plot the vector field of the air with the dividing streamline.\"\"\"\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.quiver(x, y, z, Vx, Vy, Vz, length=0.5, normalize='True', arrow_length_ratio=0.1)\n ax.plot_wireframe(ds_x, ds_y, ds_z, cmap=plt.cm.YlGnBu_r)\n ax.set_xlabel('x (m)')\n ax.set_ylabel('y (m)')\n ax.set_zlabel('z (m)')\n plt.title('Velocity Field')\n\ndef plot_air_velocity_slice(plotH, n, x, y, z, Vx, Vy, Vz):\n \"\"\"Plot an air velocity contour and vector slice at a certain height.\"\"\"\n # Plotting a Contour at a certain height (horizontal velocities only)\n plt.figure()\n ploti = int(plotH * n / inp['H'])\n plt.streamplot(x[:, :, ploti], y[:, :, ploti], Vx[:, :, ploti], Vy[:, :, ploti])\n\n # Plotting velocity vectors only at that height\n fig2 = plt.figure()\n ax2 = fig2.gca(projection='3d')\n ax2.quiver(x[:, :, ploti], y[:, :, ploti], z[:, :, ploti], Vx[:, :, ploti], Vy[:, :, ploti], Vz[:, :, ploti])\n\n# Air Velocities Function:\ndef get_velocity_field(n):\n \"\"\"Takes in downwash, vortex and weather velocities, as well as dividing \n streamline poitions and the coordinates grid. Returns air velocities.\"\"\"\n\n # Get field coordinates:\n x, y, z, dx, nz = get_coordinates(n)\n\n # Get Steady State Downwash Velocities:\n ss_w = m.sqrt(inp['W'] / (2 * m.pi * rho_a * (inp['R']**2)))\n Vwx, Vwy, Vwz = get_downwash_speed_matrixes(ss_w, x, y, z, n)\n\n # Get Dividing Streamling Coordinates:\n ds_x, ds_y, ds_z = get_dividing_streamline(n, Vwx, Vwy, Vwz)\n\n # Calculate constants:\n # mew = inp['U_inf'] / (inp['Omega'] * inp['R']) # Helicopter forward advance\n #sigma_s = 2 * inp['c'] / (m.pi * inp['R']) # Helicopter Blade Solidity - Using equation Illia gave\n Gamma0 = inp['W'] / (2 * rho_a * inp['R'] * inp['U_inf']) # Circulation constant\n\n # Initialise air velocities for small domain\n Vx_s = x * 0\n Vy_s = y * 0\n Vz_s = z * 0\n\n # Put together velocity field with dividing streamline:\n F = 1\n dz = z[0, 0, 1]\n for ds_i in range(0, int(n / nr) - 1):\n \"\"\"Since the node number in the dividing streamline are 1/nr the nodes in\n the regular velocities, each dividing streamline node is used for \n multiple velocity nodes in the z direction.\"\"\"\n\n # Work out indicies for dz:\n start_i = (np.abs(z[0, 0, :] - ds_z[0, ds_i])).argmin()\n stop_i = (np.abs(z[0, 0, :] - ds_z[0, ds_i + 1])).argmin()\n r_ds = ds_x[0, ds_i + 1]\n\n for k in range(start_i, stop_i, -1):\n k = max(k, 0)\n\n # Calculate F for height:\n F = np.exp(-(inp['H'] - z[0, 0, k]) / inp['R'])\n\n # Calculate new vortex velocity:\n r_ds = ds_x[0, ds_i] + (z[0, 0, k] - ds_z[0, ds_i]) * (ds_x[0, ds_i + 1] - ds_x[0, ds_i]) / (ds_z[0, ds_i + 1] - ds_z[0, ds_i]) # Using linear interpolation\n ρc = r_ds - inp['R']\n ρ, θ, φ, xr, yr, zr = cart2pol(x, y, z, r_ds, z[0, 0, k]) # local polar coordinated of each vortex\n Gamma = Gamma0 * F * vortex_factor\n Vvx, Vvy, Vvz = get_vortex_speed_matrixes(Gamma, ρc, x, y, z, xr, yr, zr, ρ, θ, φ)\n Vx_s = Vx_s + Vvx\n Vy_s = Vy_s + Vvy\n Vz_s = Vz_s + Vvz\n \n for i in range(0, n):\n for j in range(0, n):\n r_coord = np.sqrt(x[i, j, k]**2 + y[i, j, k]**2)\n if r_coord < r_ds:\n Vx_s[i, j, k] = Vx_s[i, j, k] + np.sqrt(F) * Vwx[i, j, k]\n Vy_s[i, j, k] = Vy_s[i, j, k] + np.sqrt(F) * Vwy[i, j, k]\n Vz_s[i, j, k] = Vz_s[i, j, k] + np.sqrt(F) * Vwz[i, j, k]\n\n\n\n\n # Plot Velocity Field and Dividing Streamline\n # plot_velocity_field_and_dividing_streamline(x, y, z, Vx_s, Vy_s, Vz_s, ds_x, ds_y, ds_z)\n\n # Plot Velocity Slice at Certain Height:\n plotH=10 # m\n # plot_air_velocity_slice(plotH, n, x, y, z, Vx_s, Vy_s, Vz_s)\n\n # Construct Larger Domain:\n X, Y, Z, domain, nx, ny=calc_required_domain(n, dx, nz)\n\n # Initialise air velocities for large domain and add Weather\n Vx=X * 0 + inp_weather['Vx']\n Vy=Y * 0 + inp_weather['Vy']\n Vz=Z * 0 + inp_weather['Vz']\n\n # With the helicopter flying in the positive x-direction:\n Vx=Vx - inp['U_inf']\n\n # Place small velocity field in the large one:\n xi=0\n yi=0\n if domain[0][0] < s_domain[0][0]:\n yi=nx\n if domain[1][0] < s_domain[0][0]:\n xi=ny\n Vx[xi:xi + n, yi:yi + n, :] += Vx_s\n Vy[xi:xi + n, yi:yi + n, :] += Vy_s\n Vz[xi:xi + n, yi:yi + n, :] += Vz_s\n\n # Plot Total Velocity Field:\n # plot_air_velocity_slice(plotH, n, X, Y, Z, Vx, Vy, Vz)\n return Vx, Vy, Vz, domain, (X, Y, Z)\n\n\n# Droplet Tracking Functions:\ndef calc_reynolds(V_slip, D_drop):\n \"\"\"Calculate Droplet Reynold's Number in 3D\"\"\"\n\n Re = (rho_a * D_drop * V_slip) / mew_a\n return Re\n\ndef calc_drag(Re):\n \"\"\"Calculate Droplet Drag Coefficient in 3D\"\"\"\n # Langmuir and Blodgett 1964\n Cd = (24 / Re) * (1 + (0.197 * Re**0.63) + (2.6e-4 * Re**1.38))\n\n # Clift, Grace and Weber 1978\n #Cd = (24 / Re) * (1 + (0.15 * Re**0.687)) + 0.42 / (1 + 4.25e-4 * (Re**-1.16))\n return Cd\n\ndef calc_evaporation_model(Dia, Re, time, dt):\n # Function that returns the new diameter of droplet\n # Dia is the diameter of the droplet in metres\n # Re is the Reynolds number of the droplet\n\n # Calculate the wet bulb temperature\n a = 611.21 # Pa\n b = 18.678\n c = 257.14 # deg C\n d = 234.5 # deg C\n psm = a * np.exp((b - inp['dry_bulb_temp'] / d) * (inp['dry_bulb_temp'] / (c + inp['dry_bulb_temp'])))\n gammam = np.log((inp['RH'] / 100) * np.exp((b - inp['dry_bulb_temp'] / d) * (inp['dry_bulb_temp'] / (c + inp['dry_bulb_temp']))))\n dewPointTemp = c * gammam / (b - gammam)\n wetBulbTemp = (2 / 3) * inp['dry_bulb_temp'] + (1 / 3) * dewPointTemp\n \n # bulbTemp is the wet bulb temperature depression in degrees Celsius\n bulbTemp = inp['dry_bulb_temp'] - wetBulbTemp\n\n # Calculate Beta:\n beta = 84.76 * (1 + 0.27 * np.sqrt(Re)) * 1e-12\n\n # Calculate Droplet Life:\n tl = Dia**2 / (beta * bulbTemp)\n\n # Calculate New Diameter\n should_break = False\n if time < tl:\n Dia = Dia * (np.sqrt(1 - dt / tl))\n else:\n should_break = True\n\n return Dia, should_break\n\ndef calc_relaxation_time(D, Cd, V_slip):\n \"\"\"Calculates droplet relaxation time. Teske 1989 Eq 8.\"\"\"\n Tp = (4 * D * rho_liquid) / (3 * Cd * rho_a * abs(V_slip))\n return Tp\n\ndef calc_travel_time(V_slip):\n \"\"\"Calculates droplet time of travel. Teske 1989 Eq 16.\"\"\"\n Tt = inp['R'] / (abs(V_slip) + (3 / 8) * np.sqrt(inp['q_squared']))\n return Tt\n\ndef calc_K(T_ratio):\n \"\"\"Calculate K function. T_ratio is Tp/Tt. Teske 1989 Eq 17.\"\"\"\n K = 0.5 * ((3 - T_ratio**2) * (1 - T_ratio) + (T_ratio)**2 - 1) / (1 - T_ratio**2)**2\n return K\n\ndef calc_variances(D, V_slip, variances, dt):\n \"\"\"Calculates the variances in one direction with previous variances \n inputted as a tuple (xx0, xv0, vv0).Returns new tuple (xx, xv, vv).\"\"\"\n # Accounting for zero slip velocities:\n Re = np.array([0., 0., 0.])\n Cd = np.array([0., 0., 0.])\n Tp = np.array([0., 0., 0.])\n for di in range(3):\n if V_slip[di] != 0:\n # Calculating new drag and Reynolds\n Re[di] = calc_reynolds(V_slip[di], D)\n Cd[di] = calc_drag(Re[di]) # Possible that V_slip could be zero and result in infinite drag, causing Tp to be zero\n\n # Calculate relacation time, travel time and K\n Tp[di] = calc_relaxation_time(D, Cd[di], V_slip[di])\n\n Tt = calc_travel_time(V_slip)\n K = calc_K(Tp / Tt)\n\n # Initialize variances\n xx0, xv0, vv0 = variances\n\n # Calculate new variances\n xu = inp['q_squared'] * (-Tp * K + Tt / 2) / 3 # Teske 1989 Eq 14\n uv = inp['q_squared'] * K / 3 # Teske 1989 Eq 15\n vv = (2 * dt * uv + Tp * vv0) / (Tp + 2 * dt) # Discretisation of Teske 1989 Eq 13\n xv = (dt * xu + Tp * xv0 + Tp * dt * vv) / (Tp + dt) # Discretisation of Teske 1989 Eq 12\n xx = 2 * dt * xv + xx0 # Discretisation of Teske 1989 Eq 11\n\n return (xx, xv, vv)\n\ndef binormal_distribution(x_mean, y_mean, x_var, y_var, x_array, y_array):\n \"\"\"Calculates the binormal distribution for domain.\"\"\"\n fx = (1 / np.sqrt(x_var * 2 * np.pi)) * np.exp(-(x_array - x_mean)**2 / (2 * x_var))\n fy = (1 / np.sqrt(y_var * 2 * np.pi)) * np.exp(-(y_array - y_mean)**2 / (2 * y_var))\n f = fx * fy\n return f\n\ndef update_droplet(V_drop, L_drop, D_drop, variances, i, Va, current_time, domain):\n \"\"\"update droplet with next time step.\"\"\"\n # Initialising next row:\n V_drop = np.vstack((V_drop, np.array([0., 0., 0.])))\n L_drop = np.vstack((L_drop, np.array([0., 0., 0.])))\n D_drop = D_drop + [0.]\n\n Vax = Va[0]\n \n # Calculate timestep:\n inv_z_transit_time = np.abs((V_drop[i - 1, 0] * Vax.shape[2]) / (s_domain[1][1] - s_domain[1][0])) # 1/characteristic time to transit 1 cell in the z direction\n inv_y_transit_time = np.abs((V_drop[i - 1, 1] * Vax.shape[0]) / (domain[1][1] - domain[1][0])) # 1/characteristic time to transit 1 cell in the y direction\n inv_x_transit_time = np.abs((V_drop[i - 1, 2] * Vax.shape[1]) / (domain[0][1] - domain[0][0])) # 1/characteristic time to transit 1 cell in the x direction\n inv_cell_transit_time = np.amax([inv_z_transit_time, inv_y_transit_time, inv_x_transit_time])\n inverse_times = [inv_cell_transit_time]\n dt = 1 / np.amax(inverse_times)\n\n # Finding Current Position Index:\n iy = int((L_drop[i - 1, 2] - domain[0][0]) * (Vax.shape[1] - 1) / (domain[0][1] - domain[0][0]))\n ix = int((L_drop[i - 1, 1] - domain[1][0]) * (Vax.shape[0] - 1) / (domain[1][1] - domain[1][0]))\n iz = int(L_drop[i - 1, 0] * (Vax.shape[2] - 1) / (s_domain[1][1] - s_domain[1][0]))\n\n # Reynolds Calculation in 3D coords\n Vxa, Vya, Vza = Va\n V_air = np.array([Vza[ix, iy, iz], Vya[ix, iy, iz], Vxa[ix, iy, iz]])\n # V_air = [-5, 0, 0.1] # Constant air testing _________________________________ ENABLED\n V_slip = abs(V_drop[i - 1, :] - V_air)\n Re = calc_reynolds(V_slip, D_drop[i - 1])\n\n # If statements to catch zero slip velocity components:\n directions = np.array([0., 0., 0.])\n Cd = np.array([0., 0., 0.])\n Fd = np.array([0., 0., 0.])\n mass = np.array([0., 0., 0.])\n Ad = np.array([0., 0., 0.])\n for di in range(3):\n if V_slip[di] != 0:\n # Direction of air slip\n directions[di] = -V_slip[di] / (V_drop[i - 1, di] - V_air[di])\n\n # Drag Calculation in 3D coords\n Cd[di] = calc_drag(Re[di])\n\n # Drag Force Calculations\n Fd[di] = Cd[di] * (np.pi * (D_drop[i - 1] / 2)**2) * (rho_a / 2) * V_slip[di]**2\n mass[di] = (1 / 6) * (np.pi * (D_drop[i - 1]**3)) * rho_liquid\n Ad[di] = (Fd[di] / mass[di]) * directions[di]\n\n # Calculate change in diameter with time:\n overall_Re = np.sqrt(Re[0]**2 + Re[1]**2 + Re[2]**2)\n D_drop[i], should_break = calc_evaporation_model(D_drop[i - 1], overall_Re, current_time, dt)\n\n # Update droplet velocity and position and diameter\n V_drop[i, :] = V_drop[i - 1, :] + g * dt + Ad * dt\n L_drop[i, :] = L_drop[i - 1, :] + V_drop[i - 1, :] * dt\n\n # Update Variances\n new_V_slip = abs(V_drop[i, :] - V_air)\n variances = calc_variances(D_drop[i], new_V_slip,\n variances, dt)\n\n # Update Time\n current_time = current_time + dt\n\n return V_drop, L_drop, D_drop, variances, should_break, current_time\n\ndef testing_droplet_outcome(L_drop, D_drop, xx, x_array, y_array, volume_matrix, should_break_initial):\n \"\"\"Test if droplet has been deposited, evaporated, or exited domain.\"\"\"\n should_break = False\n\n # Testing if droplet has been deposited\n if L_drop[0] <= 0:\n V_deposition = (1 / 6) * (np.pi * D_drop ** 3) * 1e6 # Volume depositied (ml)\n\n #x_index = int((L_drop[i, 2]) * n / (6 * inp['R']) + n / 2)\n # y_index = int((L_drop[i, 1]) * n / (6 * inp['R']) + n / 2) # n/2 to shift to middle?\n #volume_matrix[x_index, y_index] += V_deposition\n V_array = V_deposition * binormal_distribution(L_drop[2], L_drop[1], xx[2], xx[1], x_array, y_array)\n volume_matrix += V_array\n should_break = True\n\n # Testing if droplet has escaped\n has_escaped = (L_drop[2] < x_array[0, 0]) or (L_drop[2] > x_array[0, -1])\n has_escaped = has_escaped or (L_drop[1] < y_array[0, 0]) or (L_drop[1] > y_array[-1, 0])\n if has_escaped: # This just testing if escaped domain not dividing streamline\n print(f\"Escaped at %s\" % L_drop)\n should_break = True\n\n # Testing if droplet has completely evaporated\n if D_drop <= 0 or should_break_initial:\n print(f\"Droplet evaporated at %s: \" % L_drop)\n should_break = True\n\n return volume_matrix, should_break\n\ndef droplet_plots(number_drops, droplet_paths, x, y, volume_matrix):\n \"\"\"Plot droplet tracking plots\"\"\"\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n # ax = fig.add_subplot(111, projection='3d')\n # Axes3D.plot(L_drop[:,2], L_drop[:,1], L_drop[:,0])\n ax.set_xlim([x[0], x[-1]])\n ax.set_ylim([y[0], y[-1]])\n ax.set_zlim([0, inp['H']])\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n ax.set_title('Trajectory of droplets')\n for j in range(0, number_drops):\n max_index = np.max(np.where(droplet_paths[j, :, :])) + 1\n ax.plot3D(droplet_paths[j, 0:max_index, 2], droplet_paths[j, 0:max_index, 1], droplet_paths[j, 0:max_index, 0])\n plt.show(block=False)\n\n fig = plt.figure() # Plot mass deposited on ground\n plt.contourf(x, y, volume_matrix)\n plt.colorbar()\n plt.xlabel('X')\n plt.ylabel('Y')\n plt.show(block=False)\n\ndef droplet_tracking(nt, domain, positions, Va, nozzle_position):\n \"\"\"\n Single droplet tracker (takes in info on nozzle position, \n droplet size and initial velocity, air velocity matrixes drag coeff and evaporation \n and tracks droplet trajectory)\n \n Should sample droplet size & velocity from distribution\n Compute Re and Cd then advance droplet along trajectory\n Compute evaporation then update mass\n \n Advance time until deposition or escape\n \n Grab new sample until enough spray has been sampled\n \"\"\"\n\n # Initial Value Inputs:\n droplet_paths = np.zeros((number_drops, nt, 3))\n max_i = np.zeros(number_drops)\n\n # Accu-Flo 0.28 Parameters\n # http://www.bishopequipment.com/technicalinformation.html\n # Manufacturer specifies droplet sizes of 800-1000 microns\n # Log normal will incorporate outliers\n\n # Uniform Distribution with these values (testing)\n # droplet_size = np.random.uniform(droplet_min, droplet_max)\n\n droplet_size = np.random.lognormal(np.log(droplet_avg), sigma)\n\n x_array = positions[0][:, :, 0]\n y_array = positions[1][:, :, 0]\n volume_matrix = x_array * 0.0\n\n # Will need to get this info from somewhere\n inp_droplet = {\n 'V_init': np.array([inp['V_drop'], 0., 0.]), # m/s - droplet initial speed\n 'L_init': np.array([nozzle_position[2], nozzle_position[1], nozzle_position[0]]), # m - nozzle location\n 'D_init': droplet_size, # m - droplet diameter\n }\n\n for j in range(0, number_drops): # Loop over all droplets\n\n # Velocity\n theta = (np.pi / 180) * np.random.uniform(0, cone_angle / 2) # Determine droplet direction: sample an angular range within the nozzle cone angle\n phi = (np.pi / 180) * np.random.uniform(0, 360) # Determine droplet direction: sample a 360 degree range\n x_comp = inp['V_drop'] * np.sin(theta) * np.sin(phi) # x cpt of droplet velocity\n y_comp = inp['V_drop'] * np.sin(theta) * np.cos(phi) # y cpt of droplet velocity\n inp_droplet['V_init'] = np.array([inp['V_drop'], y_comp, x_comp])\n\n # Initialising\n V_drop = inp_droplet['V_init'] # Initial Droplet Velocity (z, y, x) - m/s\n L_drop = inp_droplet['L_init'] # Initial Droplet start position (z, y, x) - m\n # D_drop = [inp_droplet['D_init']] # Initial Droplet Diameter - m\n D_drop = [np.random.lognormal(np.log(droplet_avg), sigma)] # Initial Droplet Diameter - m\n xx = np.array([0., 0., 0.]) # Initialising Position Variance (z, y, x) - m\n xv = np.array([0., 0., 0.]) # Initial Variance\n vv = np.array([0., 0., 0.]) # Initial Variance\n variances = (xx, xv, vv)\n\n # Iterate with time:\n i = 1\n current_t = 0\n while i < nt:\n # Update Droplet\n V_drop, L_drop, D_drop, variances, should_break, current_t = update_droplet(V_drop, L_drop, D_drop, variances, i, Va, current_t, domain)\n\n # Paths\n droplet_paths[j, i - 1, :] = L_drop[i - 1, :]\n\n # Update position variance\n xx = np.vstack((xx, np.array([0., 0., 0.])))\n xx[i, :] = variances[0]\n\n # Testing droplet outcome\n volume_matrix, should_break = testing_droplet_outcome(L_drop[i, :],\n D_drop[i],\n xx[i, :],\n x_array,\n y_array,\n volume_matrix,\n should_break)\n if should_break:\n break\n\n i = i + 1\n\n # Did it break\n if i == nt:\n print(\"Didn't break. Ran out of time steps.\")\n\n # Plotting\n x = positions[0][0, :, 0]\n y = positions[1][:, 0, 0]\n #droplet_plots(number_drops, droplet_paths, x, y, volume_matrix)\n\n # Mean Droplet Path:\n tot_drop = np.sum(droplet_paths, 0)\n num_zeros = np.count_nonzero(droplet_paths, 0)\n mean_path = tot_drop / num_zeros\n\n return x, y, volume_matrix, mean_path\n\ndef process_tree_data():\n \"\"\"\n - Calculate tree vector distances\n - Maybe: Construct new tree position matrix, with tree positions changed to \n velocity reference frame (needs to be rotated by angle)?\n - Might not need this so hold off\n - Output:\n - Maybe: New tree matrix in velocity field reference frame?\n - A list/vector of tupples with x and y distances from helicopter in velocity field reference frame\n - e.g. [(2, 3),(5, 2),(1, 1),(6, 2)]\n - So that in each tupple: \n (distance tree is to left/right of helicopter, distance until helicopter in line with tree)\n \"\"\"\n\n # Create coordinate arrays for only trees\n S = sps.find(tree_data) # create sparse matrix containing only tree locations\n xt = S[1][:]\n yt = S[0][:]\n\n tree_distance = tree_spacing * np.sqrt((xt - heli_position[0])**2 + (heli_position[1] - yt)**2) # find absolude displacement from helicopter to trees\n old_tree_angle = np.arctan2(((heli_position[1] - yt)), ((xt - heli_position[0]))) # find angle between tree and ground domain reference frame\n new_tree_angle = old_tree_angle - heli_direction # find angle between\n a = tree_distance * np.cos(new_tree_angle) # horizontal distance to tree in helicopter reference frame\n b = tree_distance * np.sin(new_tree_angle) # vertical distance to tree in helicopter reference frame\n\t\n # map two lists into a single list of tuples\n tree_coord = list(zip(a, b))\n\n return tree_coord\n\ndef plot_spray(x, y, spray):\n \"\"\"Plot spray deposited in domain.\"\"\" \n fig = plt.figure() # Plot mass deposited on ground\n plt.contourf(x, y, spray)\n plt.colorbar()\n plt.xlabel('X')\n plt.ylabel('Y')\n plt.show(block=False) \n\ndef spray_distances(dx, volume_matrix, domain):\n \"\"\" Calculates the distance from the helicopter and width of spray. \"\"\"\n\n # Find positions and locations where volume above 1e-7:\n x_indices, y_indices = np.where(volume_matrix > 1e-7)\n x_locations = y_indices * dx + domain[0][0]\n y_locations = x_indices * dx + domain[1][0]\n \n # Find Distances:\n x_span = (min(x_locations), max(x_locations))\n y_span = (min(y_locations), max(y_locations))\n\n return x_span, y_span\n\ndef spray_outcome(time, on_off_times, dt, volume_matrix, trees):\n \"\"\"\n Simulate the true volume from nozzle and where it \n all landed. Compares to tree matrix.\n \"\"\"\n volume_matrix = np.hstack((volume_matrix, volume_matrix * 0.)) # Resize matrix to add more space at front\n spray_matrix = volume_matrix * 0.\n count = 0\n spray_time = 0\n for i in range(volume_matrix.shape[1]):\n if (time > on_off_times[count][0]) and (time < on_off_times[count][1]):\n spray_matrix[:, i:] += volume_matrix[:, :volume_matrix.shape[1] - i]\n elif (time > on_off_times[count][1]):\n spray_time += on_off_times[count][1] - on_off_times[count][0]\n count += 1\n if count == len(on_off_times):\n break\n time += dt\n # Multiply to correct volume based on nozzle flowrate:\n n = (nozzle_flowrate * spray_time) / np.sum(spray_matrix)\n spray_matrix = n * spray_matrix\n \n return spray_matrix\n\ndef nozzle_simulation(nt, positions, domain, V_field, trees, nozzle, start_time):\n \"\"\"\n - Inputs:\n - Droplet tracking inputs\n - Nozzle position (m) as tupple (x, y, z) in velocity field reference frame\n - Tree positions (m) as list of tupples [(distance to side of heli, distance until reach tree), (x, y)]\n - Run droplet tracking once\n - Calculate spray distances and spray span in both x and y directions using mean and variance info or volume matrix\n - Calculate start stop times - using helicopter forward flight and current time\n - Decide if nozzle should turn on - using info on how far to side tree is and minimum volume required\n - Outputs:\n - List of ordered start/stop nozzle times as tuples: \n - e.g. [[start time 1, stop time 1], [start time 2, stop time 2], [start time 3, stop time 3]]\n \"\"\"\n \n # Run Droplet Tracking:\n x, y, volume_matrix, mean_path = droplet_tracking(nt, domain, positions, V_field, nozzle)\n\n # Get spray location:\n dx = abs(x[0] - x[1])\n x_span, y_span = spray_distances(dx, volume_matrix, domain)\n\n # Calculate spray times for each tree:\n times = []\n for i in range(len(trees)):\n d = np.array([trees[i][1] - x_span[1], trees[i][1] - x_span[0]]) # [start_d, stop_d]\n\n # Will spray reach tree?\n if (trees[i][0] > y_span[0]) and (trees[i][0] < y_span[1]):\n times += [start_time + d / inp['U_inf']]\n times = np.sort(np.array(times), 0)\n\n # Streamline spray times so they do not overlap:\n try:\n on_off_times = [times[0]]\n count = 0\n for i in range(len(times)):\n if times[i, 0] > on_off_times[count][1]:\n count += 1\n on_off_times += [times[i]]\n elif times[i, 1] > on_off_times[count][1]:\n on_off_times[count][1] = times[i, 1]\n on_off_times = np.array(on_off_times)\n except IndexError:\n on_off_times = [[False]]\n\n # Find spray on trees:\n dt = dx / inp['U_inf']\n spray = np.hstack((volume_matrix * 0, volume_matrix * 0))\n if on_off_times[0][0] is not False:\n spray = spray_outcome(start_time, on_off_times, dt, volume_matrix, trees)\n\n return on_off_times, mean_path, volume_matrix, spray\n\ndef get_nozzle_positions():\n \"\"\"Use boom inputs to work out a list of nozzle positions as (x, y, z) tupples.\"\"\"\n nozzle_positions = [(0.0)] * n_nozzles\n d_between = boom_length / (n_nozzles - 1)\n for i in range(n_nozzles):\n nozzle_positions[i] = (boom_offset, i * d_between - boom_length / 2, inp['H'] - boom_height)\n return nozzle_positions\n\ndef plot_mean_droplet_paths(Vy, Vz, domain, positions, n, droplet_paths):\n \"\"\" Plots the air velocity field and the \n droplet paths in 2D to match AGDISP.\"\"\"\n plt.figure()\n\n # Plot Velocity Field\n plt.quiver(positions[1][:, -int(n / 2), :], positions[2][:, -int(n / 2), :], Vy[:, -int(n / 2), :], Vz[:, -int(n / 2), :], headwidth=2, headlength=3)\n plt.xlabel('y (m)')\n plt.ylabel('z (m)')\n plt.title('Droplet Path and Velocity Field')\n plt.xlim(domain[1][0], domain[1][1])\n plt.ylim(s_domain[1][0], s_domain[1][1])\n\n # Plot mean droplet path for each nozzle:\n for droplet in droplet_paths:\n max_n = np.where(droplet[:, 0] > 0)[0][-1]\n plt.plot(droplet[:max_n, 1], droplet[:max_n, 0])\n \n plt.show(block=False) \n\ndef main():\n \"\"\" Main function \"\"\"\n # Get time of code begining:\n start_time = time.time()\n\n # Model Step Sizes:\n n_base = inp_num['n_base'] # Dividing streamline sections and number of vortices\n n = 2 * nr * n_base # Number of nodes in space - must be even to avoid dividing by zero, also a multiple of nr\n # Same in every direction.\n nt = inp_num['nt'] # unitless - number of timesteps\n\n # Air Velocity Field\n Vx, Vy, Vz, domain, positions = get_velocity_field(n)\n \n # Get Nozzle Times:\n trees = process_tree_data()\n nozzle_positions = get_nozzle_positions() # (x, y, z) in metres\n nozzle_times = [0] * n_nozzles\n droplet_paths = [0] * n_nozzles\n count = 0\n volume_matrix = positions[0][:, :, 0] * 0.0\n spray_matrix = positions[0][:, :, 0] * 0.0\n spray_matrix = np.hstack((spray_matrix, spray_matrix))\n for nozzle in nozzle_positions:\n nozzle_times[count], droplet_paths[count], spray, spraid = nozzle_simulation(nt, positions, domain, (Vx, Vy, Vz), trees, nozzle, start_time)\n volume_matrix += spray\n spray_matrix += spraid\n print('Nozzle ' + str(count + 1))\n print(nozzle_times[count])\n count += 1\n\n # Plot mean droplet paths with velocity field in 2D:\n plot_mean_droplet_paths(Vy, Vz, domain, positions, n, droplet_paths)\n x = positions[0][0, :, 0]\n y = positions[1][:, 0, 0]\n #plot_spray(x, y, volume_matrix)\n x = np.hstack((x, x + (x[-1] - x[0]) + (x[1] - x[0])))\n plot_spray(x, y, spray_matrix)\n for tree in trees:\n xi = abs(y - tree[0]).argmin()\n yi = abs(x - tree[1]).argmin()\n if spray_matrix[xi, yi] > V_min * tree_spacing:\n # Foliage recieved enough herbicide\n plt.plot(tree[1], tree[0], 'g.')\n else:\n # Foliage did not recieved enough herbicide\n plt.plot(tree[1], tree[0], 'rx')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "spray_model/helicopter_spray_model.py", "file_name": "helicopter_spray_model.py", "file_ext": "py", "file_size_in_byte": 35754, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 178, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 185, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 210, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 234, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.streamplot", "line_number": 245, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 245, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 248, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 261, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 261, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 407, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 419, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 420, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 421, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 448, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 449, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 449, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 449, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 457, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 457, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 463, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 464, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 465, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 466, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 468, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 477, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 483, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 484, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 485, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 486, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 487, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 497, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 498, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 502, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 525, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 550, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 550, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 551, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 551, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 562, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 562, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 564, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 564, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 566, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 566, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 567, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 567, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 568, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 568, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 569, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 569, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 570, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 570, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 571, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 571, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 589, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 590, "usage_type": "call"}, {"api_name": "numpy.random.lognormal", "line_number": 600, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 600, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 600, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 608, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 609, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 616, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 616, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 616, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 617, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 617, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 617, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 618, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 619, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 619, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 620, "usage_type": "call"}, {"api_name": "numpy.random.lognormal", "line_number": 626, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 626, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 626, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 627, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 628, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 629, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 643, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 643, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 669, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 670, "usage_type": "call"}, {"api_name": "scipy.sparse.find", "line_number": 690, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 690, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 694, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 695, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 697, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 698, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 707, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 707, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 708, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 708, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 709, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 709, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 710, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 710, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 711, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 711, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 712, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 712, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 718, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 733, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 747, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 777, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 782, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 782, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 794, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 800, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 817, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 817, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.quiver", "line_number": 820, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 820, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 821, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 821, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 822, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 822, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 823, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 823, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 824, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 824, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 825, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 825, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 829, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 830, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 830, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 832, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 832, "usage_type": "name"}, {"api_name": "time.time", "line_number": 837, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 856, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 870, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 877, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 877, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 880, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 880, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 881, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 881, "usage_type": "name"}]} +{"seq_id": "195563993", "text": "\"\"\"Defines trends calculations for stations\"\"\"\nimport logging\n\nimport faust\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Faust will ingest records from Kafka in this format\nclass Station(faust.Record):\n stop_id: int\n direction_id: str\n stop_name: str\n station_name: str\n station_descriptive_name: str\n station_id: int\n order: int\n red: bool\n blue: bool\n green: bool\n\n\n# Faust will produce records to Kafka in this format\nclass TransformedStation(faust.Record):\n station_id: int\n station_name: str\n order: int\n line: str\n\n\n\napp = faust.App(\"stations-stream\", broker=\"kafka://localhost:9092\", store=\"memory://\")\n\ntopic = app.topic(\"connect-postgres-stations\", value_type=Station)\nout_topic = app.topic(\"transformed-postgres-station-topic\", partitions=1, value_type = TransformedStation)\n\ntransformed_station_table = app.Table(\n \"transformed-station-table\",\n default=TransformedStation,\n partitions=1,\n changelog_topic=out_topic,\n)\n\n\n\n# transformed input `Station` records into `TransformedStation` records. \n# \"line\" is the color of the station. if the`Station` record field `red` set to true, set the `line` of the `TransformedStation` record to the string `\"red\"`\n#\n@app.agent(topic)\nasync def transform_stations(station_events):\n async for station_event in station_events:\n if station_event.red == True:\n line = \"red\"\n elif station_event.blue == True:\n line = \"blue\"\n elif station_event.green == True:\n line = \"green\"\n else:\n line = \"Nil\"\n \n #logger.info(f\"station_id is {station_event.station_id} and station_name is {station_event.station_name}\")\n \n transformed_station_table[station_event.station_id] = TransformedStation(station_id = station_event.station_id, station_name = station_event.station_name, order = station_event.order, line = line)\n \n \n\n\nif __name__ == \"__main__\":\n app.main()\n", "sub_path": "consumers/faust_stream.py", "file_name": "faust_stream.py", "file_ext": "py", "file_size_in_byte": 1966, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "faust.Record", "line_number": 11, "usage_type": "attribute"}, {"api_name": "faust.Record", "line_number": 25, "usage_type": "attribute"}, {"api_name": "faust.App", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "220633464", "text": "# i used libraries that need to be installed first\nimport pandas as pd\n# read string sebagai file\nfrom io import StringIO\nimport string\nfrom Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory\nfrom string import punctuation\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n# from sklearn.feature_extraction import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\n\ndef baca_csv():\n dframe = pd.read_csv('taharah_intent.csv')\n return dframe\n\ndef convert_to_tidf():\n y = baca_csv()\n y['id_label'] = y['labels'].factorize()[0]\n id_label_df = y[['labels','id_label']].drop_duplicates().sort_values('id_label')\n label_ke_id = dict(id_label_df.values)\n id_ke_label = dict(id_label_df[['id_label', 'labels']].values)\n return y\n\ndef mnb():\n factory = StopWordRemoverFactory()\n stop_word_list = factory.get_stop_words()\n stop = stop_word_list + list(punctuation)\n tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2),\n stop_words=stop)\n df = convert_to_tidf()\n X_train, X_test, y_train, y_test = train_test_split(df['questions'], df['labels'], random_state=0)\n count_vect = CountVectorizer()\n X_train_counts = count_vect.fit_transform(X_train)\n tfidf_transformer = TfidfTransformer()\n X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)\n feed = MultinomialNB().fit(X_train_tfidf, y_train)\n return feed, count_vect\n\n#X_test.iloc[0]\n\ndef predict(question):\n feed, count_vect = mnb()\n intent = feed.predict(count_vect.transform([question]))\n intent = str(intent).strip(\"['']\")\n return intent\n\nquestion=input(\"Masukan pertanyaan : \")\nx=predict(question)\nintent=str(x).strip(\"['']\")\nprint(\"Intent predicted : \"+format(x))\n\n", "sub_path": "multinomial_naive_bayes/classify.py", "file_name": "classify.py", "file_ext": "py", "file_size_in_byte": 1993, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "Sastrawi.StopWordRemover.StopWordRemoverFactory.StopWordRemoverFactory", "line_number": 28, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 30, "usage_type": "argument"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.MultinomialNB", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "60358191", "text": "from django.shortcuts import render, HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom publicMode.views import get_list\nimport sys\nimport io\nimport os\nimport glob\nimport time\nimport operator\nfrom django.http import FileResponse\nfrom testTools.models import JobMode\nimport json\nfrom apscheduler.schedulers.base import SchedulerAlreadyRunningError, SchedulerNotRunningError\nfrom testTools.job_aps import JobRun, get_url, run_plans\nfrom testTools.common.usan import PayMock # 内部项目mock处理,不能上传\nfrom testTools.common.lucky import LuckyPayMock # 内部项目mock处理,不能上传\nfrom testTools.common.cat import catPayMock, shopCatPayMock # 内部项目mock处理,不能上传\nimport testTools.encrypts as encrypts\nfrom django.http import JsonResponse\n\n# Create your views here.\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\n_menu = os.path.abspath('.')\n\n\n@login_required\ndef web_main(request):\n \"\"\"工具主页面\"\"\"\n return render(request, \"main.html\")\n\n\n# @login_required\ndef download_app(request, project=None):\n \"\"\"app下载列表页\"\"\"\n res = get_list(\"testTool\", \"download\")\n if project:\n menu = _menu + '/Books/' + project\n if not os.path.isdir(menu):\n os.makedirs(menu)\n file_list = get_file(project)\n return render(request, \"download.html\",\n {\"products\": res[0], \"projects\": res[1], \"project\": project, \"files\": file_list})\n\n\ndef jia_mi(request):\n return render(request, \"jiami.html\")\n\n\n@csrf_exempt\ndef encrypt(request):\n mes = ''\n try:\n request_dic = json.loads(request.body)\n encrypt_type = request_dic[\"encrypt_type\"]\n req_data = request_dic[\"req_data\"]\n if int(encrypt_type) == 1:\n _dic = dict(json.loads(req_data))\n mes = encrypts.dict_encrypt_md5_sign(_dic)\n except Exception as e:\n print(e.__str__())\n return HttpResponse(json.dumps({\"res\": mes}, ensure_ascii=False), content_type=\"application/json\")\n\n\ndef get_file(project):\n \"\"\"获取待下载文件列表\"\"\"\n if project is None:\n return None\n my_folder = _menu + '/Books/' + project\n pattern = '*.*'\n os.chdir(my_folder)\n maps_list = []\n for f_name in glob.glob(pattern):\n if os.path.isfile(f_name):\n maps = dict()\n maps['name'] = f_name\n time_tup = time.localtime(os.path.getctime(f_name))\n maps['crateTime'] = time.strftime('%Y-%m-%d %H:%M:%S', time_tup)\n maps['size'] = os.path.getsize(f_name) / 1024\n maps_list.append(maps)\n return sorted(maps_list, key=operator.itemgetter('crateTime'), reverse=True)\n\n\ndef download(request, project, filename):\n \"\"\"下载文件\"\"\"\n print(request)\n my_folder = _menu + '/Books/' + project + '/' + filename\n file = open(my_folder, 'rb')\n response = FileResponse(file)\n response['Content-Type'] = 'application/octet-stream'\n # response['Content-Disposition'] = 'attachment;filename=\"example.tar.gz\"'\n return response\n\n\n@login_required\ndef job(request):\n \"\"\"job站\"\"\"\n search_dict = dict()\n job_name = request.GET.get('jobName')\n if job_name:\n search_dict['jobName'] = job_name\n job_flag = request.GET.get('dataFlag')\n if job_flag:\n search_dict['dataFlag'] = job_flag\n jobs = JobMode.objects.filter(**search_dict)\n return render(request, \"job.html\", {\"jobs\": jobs})\n\n\n@csrf_exempt\ndef job_action(request, action):\n mes = ''\n _dic = dict(json.loads(request.body))\n if action == 'jobExec': # job操作\n mes = change_job(_dic)\n if action == 'jobNowExec': # 立即执行\n run_job(_dic)\n return HttpResponse(json.dumps({\"code\": mes}, ensure_ascii=False), content_type=\"application/json\")\n\n\ndef change_job(_dic):\n _name = 'register_job_' + str(_dic['id'])\n if _dic['type'] == 'load':\n try:\n sentence1 = _name + '.shut_down()'\n # print(sentence1)\n exec(sentence1)\n except NameError:\n pass\n except SchedulerNotRunningError:\n pass\n sentence = 'global ' + _name + ';' + _name + ' = JobRun(' + str(_dic['id']) + ');'\n # print(sentence)\n exec(sentence)\n mes = '加载成功'\n elif _dic['type'] == 'run':\n try:\n sentence = _name + '.start()'\n # print(sentence)\n exec(sentence)\n mes = '运行成功'\n except SchedulerAlreadyRunningError:\n sentence = _name + '.resume()'\n # print(sentence)\n exec(sentence)\n mes = '恢复运行'\n except NameError:\n # sentence = 'global ' + _name + ';' + _name + ' = JobRun(' + str(_dic['id']) + ');' + _name + '.start();'\n # print sentence\n # exec sentence\n mes = 'Job未加载'\n elif _dic['type'] == 'pause':\n try:\n sentence = _name + '.pausing()'\n # print(sentence)\n exec(sentence)\n mes = '已暂停运行'\n except SchedulerNotRunningError:\n mes = 'JOB未运行'\n except NameError:\n mes = 'Job未加载'\n else:\n return '操作不正确'\n return mes\n\n\ndef run_job(_dic):\n job_id = _dic['id']\n try:\n job_info = JobMode.objects.get(id=job_id)\n _type = job_info.jobType\n if _type == 1:\n get_url(job_info.jobUrl)\n if _type == 2:\n run_plans(job_info.jobUrl)\n except Exception as e:\n print(e.__str__())\n\n\ndef create_order(request, project=None):\n \"\"\"app下载列表页\"\"\"\n res = get_list(\"testTool\", \"order\")\n return render(request, \"order/createOrder.html\",\n {\"products\": res[0], \"projects\": res[1], \"project\": project})\n\n\n@csrf_exempt\ndef get_mac_list(request, project):\n _name = dict(json.loads(request.body))\n data = None\n if project == 'pay':\n data = PayMock.get_machine_list(_name['name'])\n if project == 'lucky':\n data = LuckyPayMock.get_machine_list(_name['name'])\n return JsonResponse(data, safe=False, json_dumps_params={'ensure_ascii': False})\n\n\n@csrf_exempt\ndef get_order_code(request, project):\n order_id = request.POST.get('serialId')\n code = request.POST.get('code')\n return render(request, \"order/qrcode.html\", {\"order_id\": order_id, \"code\": code, \"project\": project})\n\n\n@csrf_exempt\ndef ope_pay(request, project):\n mes = \"\"\n _dic = dict(json.loads(request.body))\n if project == 'pay':\n serial_id = _dic['SerialId']\n ope_type = _dic['type']\n token = _dic['token']\n coupon = _dic['coupon']\n mes = PayMock.pay_mock(serial_id, ope_type, token, coupon)\n if project == 'lucky':\n serial_id = _dic['SerialId']\n ope_type = _dic['type']\n token = _dic['token']\n coupon = _dic['coupon']\n mes = LuckyPayMock.pay_mock(serial_id, ope_type, token, coupon)\n if project == 'cat':\n serial_id = _dic['SerialId']\n ope_type = _dic['type']\n mes = catPayMock.pay_mock(serial_id, ope_type)\n if project == 'shopCat':\n serial_id = _dic['SerialId']\n ope_type = _dic['type']\n order_type = ''\n mes = shopCatPayMock.pay_mock(serial_id, ope_type, order_type)\n return HttpResponse(json.dumps({\"code\": \"success\", \"message\": mes}, ensure_ascii=False),\n content_type=\"application/json\")\n", "sub_path": "testTools/testToolsViews.py", "file_name": "testToolsViews.py", "file_ext": "py", "file_size_in_byte": 7506, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.stdout", "line_number": 23, "usage_type": "attribute"}, {"api_name": "io.TextIOWrapper", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 27, "usage_type": "name"}, {"api_name": "publicMode.views.get_list", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 54, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 58, "usage_type": "call"}, {"api_name": "testTools.encrypts.dict_encrypt_md5_sign", "line_number": 59, "usage_type": "call"}, {"api_name": "testTools.encrypts", "line_number": 59, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 62, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 62, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 50, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 71, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "time.localtime", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.getctime", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "operator.itemgetter", "line_number": 81, "usage_type": "call"}, {"api_name": "django.http.FileResponse", "line_number": 89, "usage_type": "call"}, {"api_name": "testTools.models.JobMode.objects.filter", "line_number": 105, "usage_type": "call"}, {"api_name": "testTools.models.JobMode.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "testTools.models.JobMode", "line_number": 105, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 106, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 95, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 112, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 117, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 117, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 109, "usage_type": "name"}, {"api_name": "apscheduler.schedulers.base.SchedulerNotRunningError", "line_number": 129, "usage_type": "name"}, {"api_name": "apscheduler.schedulers.base.SchedulerAlreadyRunningError", "line_number": 141, "usage_type": "name"}, {"api_name": "apscheduler.schedulers.base.SchedulerNotRunningError", "line_number": 157, "usage_type": "name"}, {"api_name": "testTools.models.JobMode.objects.get", "line_number": 169, "usage_type": "call"}, {"api_name": "testTools.models.JobMode.objects", "line_number": 169, "usage_type": "attribute"}, {"api_name": "testTools.models.JobMode", "line_number": 169, "usage_type": "name"}, {"api_name": "testTools.job_aps.get_url", "line_number": 172, "usage_type": "call"}, {"api_name": "testTools.job_aps.run_plans", "line_number": 174, "usage_type": "call"}, {"api_name": "publicMode.views.get_list", "line_number": 181, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 182, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 188, "usage_type": "call"}, {"api_name": "testTools.common.usan.PayMock.get_machine_list", "line_number": 191, "usage_type": "call"}, {"api_name": "testTools.common.usan.PayMock", "line_number": 191, "usage_type": "name"}, {"api_name": "testTools.common.lucky.LuckyPayMock.get_machine_list", "line_number": 193, "usage_type": "call"}, {"api_name": "testTools.common.lucky.LuckyPayMock", "line_number": 193, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 194, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 186, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 201, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 197, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 207, "usage_type": "call"}, {"api_name": "testTools.common.usan.PayMock.pay_mock", "line_number": 213, "usage_type": "call"}, {"api_name": "testTools.common.usan.PayMock", "line_number": 213, "usage_type": "name"}, {"api_name": "testTools.common.lucky.LuckyPayMock.pay_mock", "line_number": 219, "usage_type": "call"}, {"api_name": "testTools.common.lucky.LuckyPayMock", "line_number": 219, "usage_type": "name"}, {"api_name": "testTools.common.cat.catPayMock.pay_mock", "line_number": 223, "usage_type": "call"}, {"api_name": "testTools.common.cat.catPayMock", "line_number": 223, "usage_type": "name"}, {"api_name": "testTools.common.cat.shopCatPayMock.pay_mock", "line_number": 228, "usage_type": "call"}, {"api_name": "testTools.common.cat.shopCatPayMock", "line_number": 228, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 229, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 229, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 204, "usage_type": "name"}]} +{"seq_id": "453156768", "text": "\nfrom main.models import Car, Propusk\nfrom django.shortcuts import render\nfrom django.db.models import Q\n\n\ndef search_view(request):\n query = None\n results = []\n propusks = []\n if request.method == 'GET':\n print(\"Get method\")\n print(request.GET)\n print(request.GET['search_value'])\n query = request.GET.get('search_value')\n\n results = Car.objects.filter(Q(grz__icontains=query))\n print(results[0].is_our)\n print(results[0].grz)\n print(results[0].user_id)\n print(results[0].create_date)\n prop = None\n for result in results:\n try:\n prop = Propusk.objects.filter(car=result)\n except:\n print('no propusk')\n if prop:\n propusks.append(prop)\n print(propusks)\n if len(propusks) > 0:\n print(propusks[0])\n print(propusks[0][0])\n\n return render(request,'search/search.html', {'query': query, 'results': results, 'propusks': propusks})\n\n\ndef car_info(request, car_id):\n car = Car.objects.get(id=car_id)\n propusk = None\n try:\n propusk = Propusk.objects.get(car=car)\n except:\n print('нет пропусков')\n return render(request,'main/car_info.html', {'car': car, 'propusk':propusk})", "sub_path": "search/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1311, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "main.models.Car.objects.filter", "line_number": 17, "usage_type": "call"}, {"api_name": "main.models.Car.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "main.models.Car", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 17, "usage_type": "call"}, {"api_name": "main.models.Propusk.objects.filter", "line_number": 25, "usage_type": "call"}, {"api_name": "main.models.Propusk.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "main.models.Propusk", "line_number": 25, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "main.models.Car.objects.get", "line_number": 39, "usage_type": "call"}, {"api_name": "main.models.Car.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "main.models.Car", "line_number": 39, "usage_type": "name"}, {"api_name": "main.models.Propusk.objects.get", "line_number": 42, "usage_type": "call"}, {"api_name": "main.models.Propusk.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "main.models.Propusk", "line_number": 42, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "606643621", "text": "from collections import OrderedDict\nfrom collections import namedtuple\n\n\nclass ChipInfo(namedtuple('ChipInfo', 'registers memory')):\n\n def replace(self, **kwargs):\n return self._replace(**kwargs)\n\n\nRegisterInfo = namedtuple('RegisterInfo', 'name type')\n\n\nREG_TYPE_NORMAL = 'normal'\nREG_TYPE_SIMPLE = 'simple'\nREG_TYPE_XBUS = 'xbus'\n\n\nCHIP_TYPE_MC4000 = 'MC4000'\nCHIP_TYPE_MC4000X = 'MC4000X'\nCHIP_TYPE_MC6000 = 'MC6000'\n\n\nCHIPS = {\n CHIP_TYPE_MC4000: ChipInfo(\n registers=[\n RegisterInfo('null', REG_TYPE_NORMAL),\n RegisterInfo('acc', REG_TYPE_NORMAL),\n RegisterInfo('p0', REG_TYPE_SIMPLE),\n RegisterInfo('p1', REG_TYPE_SIMPLE),\n RegisterInfo('x0', REG_TYPE_XBUS),\n RegisterInfo('x1', REG_TYPE_XBUS),\n ],\n memory=9\n ),\n CHIP_TYPE_MC4000X: ChipInfo(\n registers=[\n RegisterInfo('null', REG_TYPE_NORMAL),\n RegisterInfo('acc', REG_TYPE_NORMAL),\n RegisterInfo('x0', REG_TYPE_XBUS),\n RegisterInfo('x1', REG_TYPE_XBUS),\n RegisterInfo('x2', REG_TYPE_XBUS),\n RegisterInfo('x3', REG_TYPE_XBUS),\n ],\n memory=9\n ),\n CHIP_TYPE_MC6000: ChipInfo(\n registers=[\n RegisterInfo('null', REG_TYPE_NORMAL),\n RegisterInfo('acc', REG_TYPE_NORMAL),\n RegisterInfo('dat', REG_TYPE_NORMAL),\n RegisterInfo('p0', REG_TYPE_SIMPLE),\n RegisterInfo('p1', REG_TYPE_SIMPLE),\n RegisterInfo('x0', REG_TYPE_XBUS),\n RegisterInfo('x1', REG_TYPE_XBUS),\n RegisterInfo('x2', REG_TYPE_XBUS),\n RegisterInfo('x3', REG_TYPE_XBUS),\n ],\n memory=14\n ),\n}\n\n\n# this is done just to make sure this logic doesn't pollute global variable namespace blah blah\ndef make_chip_tables_use_keys():\n global CHIPS\n\n for chip_name in CHIPS.keys():\n chip = CHIPS[chip_name]\n CHIPS[chip_name] = chip.replace(\n registers=OrderedDict([\n (info.name, info)\n for info in chip.registers\n ]),\n )\nmake_chip_tables_use_keys()\n\n\ndef lookup_by_name(name):\n return CHIPS.get(name, None)\n\n\ndef list_names():\n return CHIPS.keys()\n", "sub_path": "shenasm/chips.py", "file_name": "chips.py", "file_ext": "py", "file_size_in_byte": 2255, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "collections.namedtuple", "line_number": 5, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 11, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "457094993", "text": "from bs4 import BeautifulSoup\nimport requests\nfrom ipwhois import IPWhois\nimport pywhois\nimport pythonwhois\nimport whois\nfrom pprint import pprint\nimport datetime\n\ndef findLink(reference, url):\n\tr = requests.get(url)\n\tdata = r.text\n\tsoup = BeautifulSoup(data, 'lxml')\n\tresults = soup.findAll(reference)\n\treturn results\n\t\ndef checkItems(reference, url, getter):\n\tcount = 0\n\toccur = 0\n\titems = findLink(reference, url)\n\tif items:\n\t\tfor item in items:\n\t\t\tif not isItemFromSameHost(item, url, getter):\n\t\t\t\toccur = occur + 1\n\t\t\tcount = count + 1\n\t\treturn occur/float(count)\n\treturn 0\n\t\ndef isItemFromSameHost(item, url, getter):\n\tnewItem = str(item.get(getter))\n\tif newItem.count('.')<=1:\n\t\treturn True\n\telif url not in newItem:\n\t\treturn False\n\treturn True\n\ndef isOrgOnUrl(url):\n\tprefix = 'http://www'\n\tprefix2 = 'https://www'\n\tprefix3 = 'www'\n\tif (prefix in url) or (prefix2 in url) or (prefix3 in url):\n\t\turlArr = url.split('.')[1:]\n\t\turl = '.'.join(urlArr)\n\tdetails = pythonwhois.get_whois(url)\n\torg = str(details['contacts']['admin']['organization'].split(' ')[0]).lower()\n\tif org in url:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef isValidFor6Months(url):\n\tprefix = 'http://www'\n\tprefix2 = 'https://www'\n\tprefix3 = 'www'\n\tif (prefix in url) or (prefix2 in url) or (prefix3 in url):\n\t\turlArr = url.split('.')[1:]\n\t\turl = '.'.join(urlArr)\n\tdetails = pythonwhois.get_whois(url)\n\ttoday = datetime.datetime.now()\n\tdelta = details['expiration_date'][0] - today \n\tdeltaDays = float(delta.days) - 180\n\tif deltaDays>0:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\n\n\n\"\"\"\ndomains = ['google.com', 'stackoverflow.com']\nfor dom in domains:\n\tdomain = whois.query(dom)\n\tpprint(domain)\n\n\"\"\"\n\n\"\"\"\nobj = IPWhois('www.google.com')\n\nres=obj.lookup()\n\npprint(res)\n\nw = pywhois.whois('google.com')\nprint w\n\"\"\"\n\n\"\"\"\nurl = \"http://www.python.org\"\nreference = \"a\"\ngetter = \"href\"\n\nlinks = findLink(reference, url)\nfor link in links:\n print(link.get('href'))\n\nn = checkItems(reference, url, getter)\nprint n\n\"\"\"\n\n\n\"\"\"\n\nr = requests.get(\"http://www.python.org\")\ndata = r.text\nsoup = BeautifulSoup(data)\nimgs = soup.findAll(\"img\")\n\nfor img in imgs:\n print(img.get('src'))\n\n\"\"\"\n\"\"\"\nfor link in soup.find_all('a'):\n print(link.get('href'))\n\"\"\"\n", "sub_path": "url_check/bsoup_check.py", "file_name": "bsoup_check.py", "file_ext": "py", "file_size_in_byte": 2214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}, {"api_name": "pythonwhois.get_whois", "line_number": 44, "usage_type": "call"}, {"api_name": "pythonwhois.get_whois", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 59, "usage_type": "attribute"}]} +{"seq_id": "143074323", "text": "#!/usr/bin/env python\n#\n# License: BSD\n# https://raw.github.com/robotics-in-concert/rocon_multimaster/license/LICENSE\n\nimport concert_msgs.srv as concert_srvs\nimport concert_msgs.msg as concert_msgs\nimport std_msgs.msg as std_msgs\nimport geometry_msgs.msg as geometry_msgs\nimport rospy\nimport yaml\nimport genpy\nfrom rospy_message_converter import message_converter\n\nfrom .msg import HeadYaw as FuroHeadYaw\n\n\nimport serial\nimport math\nimport threading\n\nclass DummyFuro(object):\n\n SERVICE_ENABLE_SRV_NAME = '/concert/service/enable'\n SERVICE_LIST_TOPIC_NAME = '/concert/service/list'\n FURO_SRV_NAME = '/concert/service/furo'\n\n DOT_GRAPH_CONDUCTOR = '/concert/conductor/concert_clients_dotgraph'\n DOT_GRAPH_GATEWAY = '/concert/gateway/dotgraph'\n\n running = False\n\n def __init__(self):\n self.load_example_services()\n self.load_dotgraph()\n self.init_rosapis()\n self.init_serial()\n\n def init_serial(self):\n self._serial = serial.Serial('/dev/ttyS0', 115200)\n self._serial.close()\n self._serial.open()\n self.loginfo('Serial Opened.')\n\n def PlaySpeech(self, param):\n self.loginfo('PlaySpeech('+param.data+')')\n\n def StopSpeech(self, param):\n self.loginfo('StopSpeech()')\n\n def InitPose(self, param):\n self.loginfo('InitPose()')\n\n def SetHeadYaw(self, param):\n self.loginfo('SetHeadYaw('+str(param.pitch)+','+str(param.speed)+')')\n\n def DriveWheel(self, param):\n self.loginfo('DriveWheel('+str(param.linear.x)+','+str(param.angular.x)+')')\n linear = param.linear.x\n angular = (param.angular.x) * math.pi / 180\n axleDistance = 0.3\n angular = angular * axleDistance / 2\n slope = 1.5\n gearRatio = 64\n wheelDiameter = 0.25\n leftVel = int(math.floor(slope * 60 * gearRatio * (linear - angular) / (math.pi * wheelDiameter)))\n rightVel = int(math.floor(slope * 60 * gearRatio * (linear + angular) / (math.pi * wheelDiameter)))\n self.DriveDifferential(-leftVel, 1000, rightVel, 1000)\n\n def DriveDifferential(self, left, leftTime, right, rightTime):\n arr = [0xFF,0xFF,0xFE,0x0E,0x06,0x20,0x04,0x2E,(left & 0xFF),(left >> 8 & 0xFF),(leftTime & 0xFF),(leftTime >> 8 & 0xFF),0x2F,(right & 0xFF),(right >> 8 & 0xFF),(rightTime & 0xFF),(rightTime >> 8 & 0xFF)]\n self.loginfo('arr : ' + self.array2string(arr))\n arr.append(self.CalcCheckSum(arr, len(arr)))\n self._serial.write(self.array2string(arr))\n\n def CalcCheckSum(self, arr, length):\n checkSum = 2\n for b in arr:\n checkSum += b\n return (~checkSum & 0xFF)\n\n def array2string(self, arr):\n return ''.join(chr(b) for b in arr)\n\n def StopWheel(self, param):\n self.loginfo('StopWheel()')\n\n def load_example_services(self):\n filename = rospy.get_param('~example_services')\n\n with open(filename) as f:\n profile = yaml.load(f)\n\n msg = message_converter.convert_dictionary_to_ros_message('concert_msgs/Services', profile)\n self.profile = profile\n self.services = msg\n\n def load_dotgraph(self):\n self.graph = {}\n\n self.graph['conductor_graph'] = self.load_file('conductor_graph')\n self.graph['gateway_graph'] = self.load_file('gateway_graph')\n\n def load_file(self, name):\n\n param_name = '~' + str(name)\n filename = rospy.get_param(param_name)\n with open(filename) as f:\n data = f.read()\n return data\n\n def init_rosapis(self):\n self.srv = {}\n self.srv['service_enable'] = rospy.Service(self.SERVICE_ENABLE_SRV_NAME, concert_srvs.EnableService, self.process_enable_service)\n\n self.pub = {}\n self.pub['service_list'] = rospy.Publisher(self.SERVICE_LIST_TOPIC_NAME, concert_msgs.Services, queue_size = 3, latch=True)\n self.pub['conductor_graph'] = rospy.Publisher(self.DOT_GRAPH_CONDUCTOR, std_msgs.String, queue_size = 3, latch=True)\n self.pub['gateway_graph'] = rospy.Publisher(self.DOT_GRAPH_GATEWAY, std_msgs.String, queue_size = 3, latch=True)\n self.pub['on_user_approached'] = rospy.Publisher(self.FURO_SRV_NAME+'/on_user_approached', std_msgs.Bool, queue_size = 3, latch=True)\n\n self.subs = {}\n self.subs['play_speech'] = rospy.Subscriber(self.FURO_SRV_NAME+'/play_speech', std_msgs.String, self.PlaySpeech)\n self.subs['stop_speech'] = rospy.Subscriber(self.FURO_SRV_NAME+'/stop_speech', std_msgs.Empty, self.StopSpeech)\n self.subs['set_head_yaw'] = rospy.Subscriber(self.FURO_SRV_NAME+'/set_head_yaw', FuroHeadYaw, self.SetHeadYaw)\n self.subs['init_pose'] = rospy.Subscriber(self.FURO_SRV_NAME+'/init_pose', std_msgs.Empty, self.InitPose)\n self.subs['drive_wheel'] = rospy.Subscriber(self.FURO_SRV_NAME+'/cmd_vel', geometry_msgs.Twist, self.DriveWheel)\n self.subs['stop_wheel'] = rospy.Subscriber(self.FURO_SRV_NAME+'/stop_wheel', std_msgs.Empty, self.StopWheel)\n\n def process_enable_service(self, req):\n en = 'enabled' if req.enable else 'disabled'\n self.loginfo(str(req.name) + ' : ' + en)\n\n service_found = False\n for service in self.profile['services']:\n if req.name == service['name']:\n service['enabled'] = req.enable \n service_found = True\n \n if service_found:\n msg = message_converter.convert_dictionary_to_ros_message('concert_msgs/Services', self.profile)\n self.services = msg\n self.pub['service_list'].publish(self.services)\n #self.running = req.enable\n #if self.running:\n # threading.Thread(target=self.moving).start()\n return concert_srvs.EnableServiceResponse(True, '')\n else:\n return concert_srvs.EnableServiceResponse(False,'Service not found')\n def spin(self):\n self.pub['service_list'].publish(self.services)\n self.pub['conductor_graph'].publish(self.graph['conductor_graph'])\n self.pub['gateway_graph'].publish(self.graph['gateway_graph'])\n rospy.spin()\n self._serial.close()\n self.loginfo('Serial Closed.')\n\n def loginfo(self, msg):\n rospy.loginfo('Dummy Furo : ' + str(msg))\n", "sub_path": "dummy_furo/src/dummy_furo/dummy_furo.py", "file_name": "dummy_furo.py", "file_ext": "py", "file_size_in_byte": 6266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "serial.Serial", "line_number": 40, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 60, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 66, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 66, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 67, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 67, "usage_type": "attribute"}, {"api_name": "rospy.get_param", "line_number": 89, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 92, "usage_type": "call"}, {"api_name": "rospy_message_converter.message_converter.convert_dictionary_to_ros_message", "line_number": 94, "usage_type": "call"}, {"api_name": "rospy_message_converter.message_converter", "line_number": 94, "usage_type": "name"}, {"api_name": "rospy.get_param", "line_number": 107, "usage_type": "call"}, {"api_name": "rospy.Service", "line_number": 114, "usage_type": "call"}, {"api_name": "concert_msgs.srv.EnableService", "line_number": 114, "usage_type": "attribute"}, {"api_name": "concert_msgs.srv", "line_number": 114, "usage_type": "name"}, {"api_name": "rospy.Publisher", "line_number": 117, "usage_type": "call"}, {"api_name": "concert_msgs.msg.Services", "line_number": 117, "usage_type": "attribute"}, {"api_name": "concert_msgs.msg", "line_number": 117, "usage_type": "name"}, {"api_name": "rospy.Publisher", "line_number": 118, "usage_type": "call"}, {"api_name": "std_msgs.msg.String", "line_number": 118, "usage_type": "attribute"}, {"api_name": "std_msgs.msg", "line_number": 118, "usage_type": "name"}, {"api_name": "rospy.Publisher", "line_number": 119, "usage_type": "call"}, {"api_name": "std_msgs.msg.String", "line_number": 119, "usage_type": "attribute"}, {"api_name": "std_msgs.msg", "line_number": 119, "usage_type": "name"}, {"api_name": "rospy.Publisher", "line_number": 120, "usage_type": "call"}, {"api_name": "std_msgs.msg.Bool", "line_number": 120, "usage_type": "attribute"}, {"api_name": "std_msgs.msg", "line_number": 120, "usage_type": "name"}, {"api_name": "rospy.Subscriber", "line_number": 123, "usage_type": "call"}, {"api_name": "std_msgs.msg.String", "line_number": 123, "usage_type": "attribute"}, {"api_name": "std_msgs.msg", "line_number": 123, "usage_type": "name"}, {"api_name": "rospy.Subscriber", "line_number": 124, "usage_type": "call"}, {"api_name": "std_msgs.msg.Empty", "line_number": 124, "usage_type": "attribute"}, {"api_name": "std_msgs.msg", "line_number": 124, "usage_type": "name"}, {"api_name": "rospy.Subscriber", "line_number": 125, "usage_type": "call"}, {"api_name": "msg.HeadYaw", "line_number": 125, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 126, "usage_type": "call"}, {"api_name": "std_msgs.msg.Empty", "line_number": 126, "usage_type": "attribute"}, {"api_name": "std_msgs.msg", "line_number": 126, "usage_type": "name"}, {"api_name": "rospy.Subscriber", "line_number": 127, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 127, "usage_type": "attribute"}, {"api_name": "geometry_msgs.msg", "line_number": 127, "usage_type": "name"}, {"api_name": "rospy.Subscriber", "line_number": 128, "usage_type": "call"}, {"api_name": "std_msgs.msg.Empty", "line_number": 128, "usage_type": "attribute"}, {"api_name": "std_msgs.msg", "line_number": 128, "usage_type": "name"}, {"api_name": "rospy_message_converter.message_converter.convert_dictionary_to_ros_message", "line_number": 141, "usage_type": "call"}, {"api_name": "rospy_message_converter.message_converter", "line_number": 141, "usage_type": "name"}, {"api_name": "concert_msgs.srv.EnableServiceResponse", "line_number": 147, "usage_type": "call"}, {"api_name": "concert_msgs.srv", "line_number": 147, "usage_type": "name"}, {"api_name": "concert_msgs.srv.EnableServiceResponse", "line_number": 149, "usage_type": "call"}, {"api_name": "concert_msgs.srv", "line_number": 149, "usage_type": "name"}, {"api_name": "rospy.spin", "line_number": 154, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "23048548", "text": "import pytest\n\nimport pyslackersweb\nimport pyslackersweb.website.tasks\n\npytest_plugins = (\"slack.tests.plugin\",)\n\n\n@pytest.fixture\nasync def client(aiohttp_client, slack_client):\n\n application = await pyslackersweb.app_factory()\n\n app_client = await aiohttp_client(application)\n app_client.app[\"scheduler\"].shutdown()\n app_client.app[\"website_app\"][\"slack_client\"] = slack_client\n app_client.app[\"website_app\"][\"slack_client_legacy\"] = slack_client\n\n return app_client\n", "sub_path": "tests/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 487, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pyslackersweb.app_factory", "line_number": 12, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 9, "usage_type": "attribute"}]} +{"seq_id": "220395525", "text": "\"\"\"\nAn example html page made with classes,\nallows for a simple webserver to run to show html/css\n\"\"\"\nfrom .attributes import Attr_InlineStyle, Style_declaration\nfrom .elements import Page, Tag_With_Inner\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\n\ndef get_page():\n \"\"\"Returns an example html page convert to string to get html output\"\"\"\n page = Page(\"Test Page\", lang=\"en\")\n page.body.add_inner(Tag_With_Inner(\"h1\", \"Test Page\"))\n page.body.add_inner(Tag_With_Inner(\"p\", \"Welcome to the test page\"))\n page.body.add_inner(Tag_With_Inner(\"a\", \"Enchantedcode\", href=\"https://enchantedcode.co.uk\"))\n return page\n\nclass Test_Web_Server(BaseHTTPRequestHandler):\n \"\"\"The test server BaseHTTPRequestHandler\"\"\"\n def do_GET(self):\n if self.path != \"\":\n self.send_response(404)\n else:\n self.send_response(200)\n self.end_headers()\n self.wfile.write(bytes(html_file, \"utf-8\"))\n\ndef run_webserver(port=5050):\n \"\"\"Runs a simple local host server to serve the example page, will serve forever\"\"\"\n httpd = HTTPServer((\"localhost\", port), Test_Web_Server)\n try:\n print(\"Use Ctrl+C to stop server\")\n httpd.serve_forever()\n except KeyboardInterrupt:\n httpd.shutdown()\n\nif __name__ == \"__main__\":\n html_file = str(get_page())\n print(html_file)\n run_webserver()\n", "sub_path": "website_to_class/examples.py", "file_name": "examples.py", "file_ext": "py", "file_size_in_byte": 1373, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "elements.Page", "line_number": 11, "usage_type": "call"}, {"api_name": "elements.Tag_With_Inner", "line_number": 12, "usage_type": "call"}, {"api_name": "elements.Tag_With_Inner", "line_number": 13, "usage_type": "call"}, {"api_name": "elements.Tag_With_Inner", "line_number": 14, "usage_type": "call"}, {"api_name": "http.server.BaseHTTPRequestHandler", "line_number": 17, "usage_type": "name"}, {"api_name": "http.server.HTTPServer", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "117401971", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom fixture.session import SessionHelper\nfrom fixture.request import RequestHelper\nfrom fixture.waitFor import WaitForHelper\n\n\nclass Application:\n\n def __init__(self, config):\n caps = DesiredCapabilities.FIREFOX\n caps[\"marionette\"] = True\n caps[\"binary\"] = config['test_config']['browser_path']\n profile = webdriver.FirefoxProfile(config['test_config']['profile'])\n self.driver = webdriver.Firefox(capabilities=caps, firefox_profile=profile)\n #self.driver.implicitly_wait(10)\n self.base_url = config['web']['baseUrl']\n self.config = config\n self.verificationErrors = []\n self.accept_next_alert = True\n self.open_home_page()\n self.session = SessionHelper(self)\n self.request = RequestHelper(self)\n self.waitFor = WaitForHelper(self)\n\n def is_valid(self):\n try:\n self.driver.current_url\n return True\n except:\n return False\n\n def open_home_page(self):\n driver = self.driver\n driver.get(self.base_url + \"itwCredo/home.seam\")\n\n def fail(self, message):\n print(message)\n assert False\n\n def destroy(self):\n self.driver.quit()", "sub_path": "fixture/application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 1324, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities.FIREFOX", "line_number": 11, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities", "line_number": 11, "usage_type": "name"}, {"api_name": "selenium.webdriver.FirefoxProfile", "line_number": 14, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 14, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 15, "usage_type": "name"}, {"api_name": "fixture.session.SessionHelper", "line_number": 22, "usage_type": "call"}, {"api_name": "fixture.request.RequestHelper", "line_number": 23, "usage_type": "call"}, {"api_name": "fixture.waitFor.WaitForHelper", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "40607870", "text": "'''Integrations with github'''\nimport requests\nfrom ghost_ci.exceptions import TokenExchangeFailure\n\n\ndef fetch_bibliography(token):\n '''Fetch list of user's repositories from github'''\n response = requests.get(\n 'https://api.github.com/user/repos',\n params={\n 'access_token': token,\n 'sort': 'created',\n 'direction': 'desc'\n }\n )\n assert response.status_code == 200\n return response.json()\n\n\nclass GithubOAuth2Provider(object):\n '''Github OAuth2 provider'''\n def __init__(self, client_id, client_secret):\n self.client_id = client_id\n self.client_secret = client_secret\n\n @property\n def authorization_url(self):\n '''Where users should go to get a session code'''\n return \"https://github.com/login/oauth/authorize?\" + \\\n \"scope=user:email&\" + \\\n f\"client_id={self.client_id}\"\n\n def get_access_token(self, session_code):\n '''Exchange with OAuth2 Provider'''\n try:\n response = requests.post(\n 'https://github.com/login/oauth/access_token',\n params={\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'code': session_code\n },\n headers={'Accept': 'application/json'}\n )\n return response.json()['access_token']\n except KeyError as err:\n raise TokenExchangeFailure(err)\n\n def get_user_info(self, access_token): # pylint: disable=no-self-use\n '''Get user info from identity provider'''\n try:\n response = requests.get(\n \"https://api.github.com/user\",\n params={'access_token': access_token},\n headers={'Accept': 'application/json'}\n )\n assert response.status_code == 200\n return response.json()\n except Exception as err: # pylint: disable=bare-except\n raise TokenExchangeFailure(err)\n", "sub_path": "ghost_ci/perimeter/github.py", "file_name": "github.py", "file_ext": "py", "file_size_in_byte": 2035, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 36, "usage_type": "call"}, {"api_name": "ghost_ci.exceptions.TokenExchangeFailure", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 52, "usage_type": "call"}, {"api_name": "ghost_ci.exceptions.TokenExchangeFailure", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "360060484", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nimport logging\n\nfrom pyramid.authentication import CallbackAuthenticationPolicy\nfrom pyramid.exceptions import HTTPNotFound\nfrom pyramid.security import Allow, ALL_PERMISSIONS\nfrom ziggurat_foundations.models.services.resource import ResourceService\nfrom ziggurat_foundations.permissions import permission_to_pyramid_acls\n\nfrom testscaffold.services.auth_token import AuthTokenService\nfrom testscaffold.services.user import UserService\nfrom testscaffold.util import safe_integer\n\nlog = logging.getLogger(__name__)\n\n\ndef groupfinder(userid, request):\n if userid and hasattr(request, 'user') and request.user:\n groups = ['group:%s' % g.id for g in request.user.groups]\n return groups\n return []\n\n\nclass AuthTokenAuthenticationPolicy(CallbackAuthenticationPolicy):\n def __init__(self, callback=None):\n self.callback = callback\n\n def remember(self, request, principal, **kw):\n return []\n\n def forget(self, request):\n return []\n\n def unauthenticated_userid(self, request):\n token = u'{}'.format(\n request.headers.get('x-testscaffold-auth-token', '')\n )\n if token:\n auth_token = AuthTokenService.by_token(\n token, db_session=request.dbsession)\n if auth_token:\n log.info(\n 'AuthTokenAuthenticationPolicy.unauthenticated_userid',\n extra={'found': True, 'owner': auth_token.owner_id})\n return auth_token.owner_id\n log.info('AuthTokenAuthenticationPolicy.unauthenticated_userid',\n extra={'found': False, 'owner': None})\n\n def authenticated_userid(self, request):\n return self.unauthenticated_userid(request)\n\n\ndef rewrite_root_perm(outcome, perm_user, perm_name):\n \"\"\"\n Translates root_administration into ALL_PERMISSIONS object\n \"\"\"\n if perm_name == 'root_administration':\n return outcome, perm_user, ALL_PERMISSIONS\n else:\n return outcome, perm_user, perm_name\n\n\ndef allow_root_access(request, context):\n \"\"\"\n Adds ALL_PERMISSIONS to every resource if user has 'root_permission'\n \"\"\"\n if getattr(request, 'user'):\n for perm in permission_to_pyramid_acls(request.user.permissions):\n if perm[2] == 'root_administration':\n context.__acl__.append(\n (perm[0], perm[1], ALL_PERMISSIONS))\n\n\ndef object_security_factory(request):\n object_type = request.matchdict['object']\n # fetch deta\n if object_type in ['resources', 'entries']:\n return DefaultResourceFactory(request)\n\n return RootFactory(request)\n\n\ndef filter_admin_panel_perms(item):\n if str(item[2]).startswith('admin_'):\n return False\n return True\n\n\nclass RootFactory(object):\n \"\"\"\n General factory for non-resource specific pages, returns an empty\n context object that will list permissions ONLY for the user specific\n to this request from ziggurat\n \"\"\"\n\n def __init__(self, request):\n self.__acl__ = []\n # general page factory - append custom non resource permissions\n if getattr(request, 'user'):\n permissions = UserService.permissions(request.user,\n db_session=request.dbsession)\n has_admin_panel_access = False\n panel_perms = ['admin_panel', ALL_PERMISSIONS]\n for outcome, perm_user, perm_name in permission_to_pyramid_acls(\n permissions):\n perm_tuple = rewrite_root_perm(outcome, perm_user, perm_name)\n if perm_tuple[0] is Allow and perm_tuple[2] in panel_perms:\n has_admin_panel_access = True\n self.__acl__.append(perm_tuple)\n\n # users have special permission called `admin_panel`\n # it should be prerequisite for other `admin*` permissions\n # if it is not present let's deny other admin permissions\n if not has_admin_panel_access:\n self.__acl__ = list(\n filter(filter_admin_panel_perms, self.__acl__))\n\n\nclass DefaultResourceFactory(object):\n def __init__(self, request):\n self.__acl__ = []\n resource_id = safe_integer(request.matchdict.get(\"object_id\"))\n self.resource = ResourceService.by_resource_id(\n resource_id, db_session=request.dbsession)\n if not self.resource:\n raise HTTPNotFound()\n\n if self.resource:\n self.__acl__ = self.resource.__acl__\n\n if self.resource and request.user:\n # add perms that this user has for this resource\n # this is a big performance optimization - we fetch only data\n # needed to check one specific user\n permissions = ResourceService.perms_for_user(\n self.resource, request.user)\n for outcome, perm_user, perm_name in permission_to_pyramid_acls(\n permissions):\n self.__acl__.append(\n rewrite_root_perm(outcome, perm_user, perm_name))\n\n allow_root_access(request, context=self)\n", "sub_path": "testscaffold/security.py", "file_name": "security.py", "file_ext": "py", "file_size_in_byte": 5201, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "pyramid.authentication.CallbackAuthenticationPolicy", "line_number": 26, "usage_type": "name"}, {"api_name": "testscaffold.services.auth_token.AuthTokenService.by_token", "line_number": 41, "usage_type": "call"}, {"api_name": "testscaffold.services.auth_token.AuthTokenService", "line_number": 41, "usage_type": "name"}, {"api_name": "pyramid.security.ALL_PERMISSIONS", "line_number": 60, "usage_type": "name"}, {"api_name": "ziggurat_foundations.permissions.permission_to_pyramid_acls", "line_number": 70, "usage_type": "call"}, {"api_name": "pyramid.security.ALL_PERMISSIONS", "line_number": 73, "usage_type": "name"}, {"api_name": "testscaffold.services.user.UserService.permissions", "line_number": 102, "usage_type": "call"}, {"api_name": "testscaffold.services.user.UserService", "line_number": 102, "usage_type": "name"}, {"api_name": "pyramid.security.ALL_PERMISSIONS", "line_number": 105, "usage_type": "name"}, {"api_name": "ziggurat_foundations.permissions.permission_to_pyramid_acls", "line_number": 106, "usage_type": "call"}, {"api_name": "pyramid.security.Allow", "line_number": 109, "usage_type": "name"}, {"api_name": "testscaffold.util.safe_integer", "line_number": 124, "usage_type": "call"}, {"api_name": "ziggurat_foundations.models.services.resource.ResourceService.by_resource_id", "line_number": 125, "usage_type": "call"}, {"api_name": "ziggurat_foundations.models.services.resource.ResourceService", "line_number": 125, "usage_type": "name"}, {"api_name": "pyramid.exceptions.HTTPNotFound", "line_number": 128, "usage_type": "call"}, {"api_name": "ziggurat_foundations.models.services.resource.ResourceService.perms_for_user", "line_number": 137, "usage_type": "call"}, {"api_name": "ziggurat_foundations.models.services.resource.ResourceService", "line_number": 137, "usage_type": "name"}, {"api_name": "ziggurat_foundations.permissions.permission_to_pyramid_acls", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "262604921", "text": "\"\"\"\nOpen Database Connectivity concepts\n\"\"\"\n\nimport re\nimport sys\nimport cgi\nimport lib_util\n\n# ('C:\\\\Program Files (x86)\\\\Microsoft Visual Studio 8\\\\Crystal Reports\\\\Samples\\\\en\\\\Databases\\\\xtreme', None, 'MSysAccessObjects', 'SYSTEM TABLE', None)\n# connectString = 'Driver={Microdsoft ODBC for Oracle};Server=:/.;uid= ;pwd='\n# cnxn = pyodbc.connect(connectString)\n\n# \"ODBC;DSN=TMA;UID=tmar;PWD=myPASSWORD;DBQ=tma;DBA= W;APA=T;PFC=1;TLO=0;DATABASE=\"\n\n# ODBC_ConnectString = \"DSN=%s\" % dsnNam\n# Ca fonctionne:\n# dsnNam=\"MyOracleDataSource\"\n\n# This works when giving the DATABASE, or not.\n# ODBC_ConnectString = 'DSN=%s;UID=system;PWD=troulala;DATABASE=\"XE\"' % dsnNam\n# ODBC_ConnectString = 'DSN=%s;UID=system;PWD=troulala' % dsnNam\n\n#\t\"odbc/dsn\" : ( \"tab\", \"#CCFF11\", \"#CCFF11\", 0, False ),\n#\t\"odbc/table\" : ( \"tab\", \"#11FF11\", \"#CCFF11\", 0, False ),\n#\t\"odbc/column\" : ( \"tab\", \"#11FF11\", \"#44FF11\", 0, False ),\n#\t\"odbc/procedure\" : ( \"tab\", \"#11FF11\", \"#CC4411\", 0, False ),\n\n\ndef Graphic_shape():\n\treturn \"tab\"\n\ndef Graphic_colorfill():\n\treturn \"#CCFF11\"\n\ndef Graphic_colorbg():\n\treturn \"#CCFF11\"\n\ndef Graphic_border():\n\treturn 0\n\ndef Graphic_is_rounded():\n\treturn True\n\n\n\n\n# Within a query component, the characters \";\", \"/\", \"?\", \":\", \"@\", \"&\", \"=\", \"+\", \",\", and \"$\" are reserved.\n# reserved = gen-delims / sub-delims\n# gen-delims = \":\" / \"/\" / \"?\" / \"#\" / \"[\" / \"]\" / \"@\"\n# sub-delims = \"!\" / \"$\" / \"&\" / \"'\" / \"(\" / \")\"\n# \t\t / \"*\" / \"+\" / \",\" / \";\" / \"=\"\n\n\n# \"UID=xxx;PWD=yyy;DRIVER=zzz\"\n# entity.py?xid=odbc/table.Dsn=@@@@@@@@,Table=MY_TABLE\n# entity.py?xid=odbc/table.Dsn:UID-xxx;PWD-yyy;DRIVER-zzz,Table:MY_TABLE\n# entity.py?xid=odbc/table.Dsn:UID(xxx)-PWD(yyy)-DRIVER(zzz),Table:MY_TABLE\n# entity.py?xid=odbc/table.Dsn:UID-xxx-PWD-yyy-DRIVER-zzz,Table:MY_TABLE\n# On encode ou crypte (base64) les valeurs qui ne sont pas en alphanum.\n# \"True\", \"False\", \"true\", \"yes\", \"0\", \"1\"\nrgxTrueFalse = \"[a-zA-Z01]*\"\n\nrgxUser = \"\\w+\"\n\n# TODO: Will not work if \"=\" in the password, even if it is escaped.\n# Should reasonably contain more than four or five chars.\nrgxPassword = \".+\"\n\n# Hexadecimal number\nrgxHexa = \"[0-9a-fA-F]+\"\n\nrgxNumber = \"\\d+\"\n\nrgxAlpha = \"[a-zA-Z]+\"\n\n# Unfortunately it is not able to filter escaped equal signs.\nrgxAnything = \"[^=]+\"\n\nrgxFileName = rgxAnything\n\n# The individual regular expressions do not contain the pipe character,\n# because it is already used between each regular expression.\n\n# https://www.connectionstrings.com/oracle/\n\nmapRgxODBC = {\n\t\"ALLOWBATCH\" : rgxTrueFalse, #\n\t\"ALLOWUSERVARIABLES\" : rgxTrueFalse, #\n\t\"ALLOWZERODATETIME\" : rgxTrueFalse, #\n\t\"AUTOENLIST\" : rgxTrueFalse, #\n\t\"CACHESERVERPROPERTIES\" : rgxTrueFalse, #\n\t\"CACHETYPE\" : rgxAlpha, # \"File\"\n\t\"CERTIFICATE STORE LOCATION\" : rgxUser, # \"CurrentUser\"\n\t\"CERTIFICATE THUMBPRINT\" : rgxHexa, # \"479436009a40f3017a145cf8479e7694d7aadef0\"\n\t\"CERTIFICATEFILE\" : rgxFileName, # \"C:\\folder\\client.pfx\"\n\t\"CERTIFICATEPASSWORD\" : rgxPassword,\n\t\"CHARSET\" : \"\\w+\", # \"utf8\"\n\t\"CHECKPARAMETERS\" : rgxTrueFalse, #\n\t\"COMMAND LOGGING\" : rgxTrueFalse, #\n\t\"CONNECTION ?LIFETIME\" : rgxNumber, # \" \" character is optional.\n\t\"CONNECTION TIMEOUT\" : rgxNumber, #\n\t\"CONNECTIONRESET\" : rgxTrueFalse, #\n\t\"CONVERTZERODATETIME\" : rgxTrueFalse, #\n\t\"DATA SOURCE\" : \"[a-zA-Z_0-9\\\\/]+\", # \"C:\\myFolder\\myAccessFile.accdb\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t # \"|DataDirectory|\\myAccessFile.accdb\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t # \"\\\\server\\share\\folder\\myAccessFile.accdb\"\n\t\"DATABASE\" : \"[ a-zA-Z0-9._]+\",\n\t\"DB\" : \"[a-zA-Z0-9._]*\",\n\t\"DBA PRIVILEGE\" : rgxAnything, # \"SYSDBA\", \"SYSOPER\"\n\t\"DBQ\" : rgxAnything, # \"C:\\mydatabase.accdb\", \"111.21.31.99:1521/XE\", \"myTNSServiceName\"\n\t\"DECR POOL SIZE\" : rgxNumber, #\n\t\"DEFAULT COMMAND TIMEOUT\" : rgxNumber, #\n\t\"DEFAULTTABLECACHEAGE\" : rgxNumber, #\n\t\"DRIVER\" : \"\\{[^}]*\\}\", # \"{Microsoft Access Driver (*.mdb, *.accdb)}\"\n\t\"DSN\" : \"\\w+\", # \"MY_DSN_ORA12\"\n\t\"ENCRYPT\" : rgxTrueFalse, # \"true\"\n\t\"EXCLUSIVE\" : rgxTrueFalse, # \"1\"\n\t\"EXTENDEDANSISQL\" : rgxTrueFalse, # \"1\"\n\t\"EXTENDED PROPERTIES\" : rgxAnything, #\n\t\"FILEDSN\" : rgxFileName,\n\t\"IGNORE PREPARE\" : rgxTrueFalse, #\n\t\"INCR POOL SIZE\" : rgxNumber, #\n\t\"INITIAL CATALOG\" : rgxAnything, # \"myDataBase\"\n\t\"INTEGRATEDSECURITY\" : rgxTrueFalse, #\n\t\"JET OLEDB:DATABASE PASSWORD\" : rgxPassword,\n\t\"KEEPALIVE\" : rgxNumber, #\n\t\"LOCALE IDENTIFIER\" : \"\\d+\", # \"2057\" is en-gb locale identifier\n\t\"LOAD BALANCING\" : rgxTrueFalse, #\n\t\"MAX POOL SIZE\" : rgxNumber, #\n\t\"MIN POOL SIZE\" : rgxNumber, #\n\t\"MAXIMUMPOOLSIZE\" : rgxNumber, #\n\t\"MINIMUMPOOLSIZE\" : rgxNumber, #\n\t\"MODE\" : \"[a-zA-Z ]+\", # \"Share Exclusive\"\n\t\"ODBCKEY[12]\" : rgxAnything,\n\t\"OLDGUIDS\" : rgxTrueFalse, #\n\t\"OLEDBKEY[12]\" : rgxAnything,\n\t\"OPTION\" : rgxAnything, #\n\t\"OSAUTHENT\" : rgxTrueFalse, # \"1\"\n\t\"PASSWORD\" : rgxPassword,\n\t\"PERSIST SECURITY INFO\" : rgxTrueFalse,\n\t\"PIPENAME\" : \"\\w+\", # If \"Protocol\" = \"pipe\".\n\t\"POOLING\" : rgxTrueFalse, #\n\t\"PORT\" : \"\\d+\", # TODO: Five numbers or less.\n\t\"PROCEDURECACHESIZE\" : rgxNumber, #\n\t\"PROTOCOL\" : \"\\w+\", # \"socket|memory|pipe\"\n\t\"PROVIDER\" : \"[ a-zA-Z0-9._]+\", # \"Microsoft.ACE.OLEDB.12.0\"\n\t\"PWD\" : rgxPassword,\n\t\"REMOTE SERVER\" : rgxAnything, # \"http://server.adress.com\"\n\t\"SERVER\" : \"[- a-zA-Z0-9\\._\\\\\\]+\", # \"serverAddress1, serverAddress2, serverAddress3\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t # This Oracle omission of tnsnames.ora is not taken into account.\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t # \"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=MyHost)(PORT=MyPort))(CONNECT_DATA=(SERVICE_NAME=MyOracleSID)))\"\n\t\"SHARED MEMORY NAME\" : \"\\w+\", # \"MYSQL\" for example. If \"Protocol\" = \"memory\".\n\t\"SOCKET\" : rgxAnything, #\n\t\"SQLSERVERMODE\" : rgxTrueFalse, #\n\t\"SSLCERT\" : rgxFileName, # \"c:\\client-cert.pem\"\n\t\"SSLKEY\" : rgxFileName, # \"c:\\client-key.pem\"\n\t\"SSLMODE\" : \"\\w+\", # \"Preferred|Required\"\n\t\"SSLVERIFY\" : rgxTrueFalse, # \"1\"\n\t\"STMT\" : rgxAnything, #\n\t\"SYSTEMDB\" : rgxAnything, # \"C:\\mydatabase.mdw\"\n\t\"TABLECACHE\" : rgxTrueFalse, #\n\t\"TRUSTED_CONNECTION\" : rgxTrueFalse,\n\t\"UID\" : rgxUser,\n\t\"USEAFFECTEDROWS\" : rgxTrueFalse, #\n\t\"USECOMPRESSION\" : rgxTrueFalse, #\n\t\"USER\" : rgxUser,\n\t\"USER ID\" : rgxUser,\n\t\"USEPERFORMANCEMONITOR\" : rgxTrueFalse, #\n\t\"USEPROCEDUREBODIES\" : rgxTrueFalse, #\n\t\"USEUSAGEADVISOR\" : rgxTrueFalse, #\n}\n\n# Keys which are specifically coded as passwords.\n# Should be [\"PWD\",\"PASSWORD\",\"JET OLEDB:DATABASE PASSWORD\"]\nodbcKeysConfidential = [keyWrd for keyWrd in mapRgxODBC if mapRgxODBC[keyWrd] == rgxPassword ]\n\n# Values which do not need to be encoded, making things easier to understand.\nodbcKeysUncoded = [keyWrd for keyWrd in mapRgxODBC if mapRgxODBC[keyWrd] in [rgxAlpha,rgxUser,rgxTrueFalse,rgxHexa,rgxNumber] ]\n\n# This contains the most often used keys in DSN connection strings.\nrestrictRgxODBCKeys = [\n\t\"DATA SOURCE\",\n\t\"DATABASE\",\n\t\"DB\",\n\t\"DRIVER\",\n\t\"DSN\",\n\t\"PROTOCOL\",\n\t\"PROVIDER\",\n\t\"PWD\",\n\t\"REMOTE SERVER\",\n\t\"SERVER\",\n\t\"SHARED MEMORY NAME\",\n\t\"SOCKET\",\n\t\"SQLSERVERMODE\",\n\t\"TRUSTED_CONNECTION\",\n\t\"UID\",\n\t\"USER\",\n\t\"USER ID\",\n]\n\n# Only the commonest parameters, to make memory scans faster.\nmapRgxODBC_Light = { key : mapRgxODBC[key] for key in restrictRgxODBCKeys }\n\n# This need a to be not-too-reserved character.\ndelimiterConnectionStringODBC = \"/\" # \"-\"\n\n# This behaves like a string plus some properties for serialization.\n# So it can be used as a keyword for encoding parameters in the id of an object,\n# but also it contains serialization methods.\n# Therefore, it can be mixed with plain string keywords, which is the most common case.\nclass CgiPropertyDsn(str):\n\t# Python 2\n\tdef __new__(cls):\n\t\treturn super(CgiPropertyDsn, cls).__new__(cls, \"Dsn\")\n\n\t#def __new__(self):\n\t#\tobj = str.__new__(cls, \"Dsn\")\n\t#\treturn obj\n\n\tdef SplitPlain(connectStrClear):\n\t\treturn re.split( \" *; *\", connectStrClear )\n\n\tdef ValueEncode(self,connectStrClear):\n\t\t# sys.stderr.write(\"ValueEncode connectStrClear=%s\\n\"%connectStrClear)\n\t\tvecKeywrd = re.split( \" *; *\", connectStrClear )\n\n\t\tdef KeyValuePairEncode(kvPair):\n\t\t\t( aKeyWrd,aVal ) = re.split( \" *= *\", kvPair )\n\t\t\t# sys.stderr.write(\"KeyValuePairEncode aKeyWrd=%s\\n\"%aKeyWrd)\n\t\t\tif aKeyWrd in odbcKeysConfidential:\n\t\t\t\t# aVal = lib_util.EncodeUri(aVal) # SHOULD BE CRYPTED\n\t\t\t\taVal = cgi.escape(aVal) # SHOULD BE CRYPTED\n\t\t\telif aKeyWrd not in odbcKeysUncoded:\n\t\t\t\taVal = cgi.escape(aVal)\n\t\t\treturn aKeyWrd.upper() + \"~\" + aVal\n\n\t\t# return \"-\".join( KeyValuePairEncode(aKeyW.upper(),vecKeywrd[aKeyW]) for aKeyW in vecKeywrd )\n\n\t\t# Cannot use \"-\" as it is accepted in server names.\n\t\treturn delimiterConnectionStringODBC.join( KeyValuePairEncode(kvPair) for kvPair in vecKeywrd )\n\n\tdef ValueDecode(self,connectStrCoded):\n\t\t# sys.stderr.write(\"ValueDecode connectStrCoded=%s\\n\"%connectStrCoded)\n\n\t\t# PROBLEM \"SERVER=\\RCHATEAU-HP\"\n\t\t# SERVER=\\\\RCHATEAU;Key=Cannot decode:HP\n\n\t\tvecTokPairs = re.split( delimiterConnectionStringODBC, connectStrCoded )\n\n\t\tdef TokenDecode(aTok):\n\t\t\t# sys.stderr.write(\"TokenDecode aTok=%s\\n\"%aTok)\n\n\t\t\t# DecodeUri inverse de EncodeUri mais ca n existe pas !!!!\n\t\t\tdef TokenLocalDecode(aVal):\n\t\t\t\treturn aVal\n\n\t\t\ttry:\n\t\t\t\t(aKeyWrd,aVal) = aTok.split(\"~\")\n\t\t\texcept ValueError:\n\t\t\t\treturn \"Key=Cannot decode:\"+str(aTok)\n\n\t\t\tif aKeyWrd in odbcKeysConfidential:\n\t\t\t\taVal = TokenLocalDecode(aVal) # SHOULD BE CRYPTED\n\t\t\telif aKeyWrd not in odbcKeysUncoded:\n\t\t\t\taVal = TokenLocalDecode(aVal)\n\t\t\t# sys.stderr.write(\"TokenDecode aVal=%s\\n\"%aVal)\n\t\t\treturn aKeyWrd + \"=\" + aVal\n\n\t\treturn \";\".join( TokenDecode(aTok) for aTok in vecTokPairs )\n\n\t# Same thing as displaying but the password must be hidden.\n\tdef ValueDisplay(self,connectStrCoded):\n\t\tconnectStrClear = self.ValueDecode(connectStrCoded)\n\t\tconnectStrHidden = connectStrClear\n\t\tconnectStrHidden = re.sub(\"PWD=[^;]+\",\"PWD=xxxxxxx\", connectStrHidden,re.IGNORECASE)\n\t\tconnectStrHidden = re.sub(\"PASSWORD=[^;]+\",\"PASSWORD=xxxxxxx\", connectStrHidden,re.IGNORECASE)\n\t\treturn connectStrHidden\n\n\t# This must be very fast because used in loops.\n\t# It abbreviates the DSN especially if this is a connection string.\n\tdef ValueShortDisplay(self,connectStrCoded):\n\t\tconnectStrClear = self.ValueDecode(connectStrCoded)\n\t\t# sys.stderr.write(\"ValueShortDisplay connectStrCoded=%s connectStrClear=%s\\n\"%(connectStrCoded,connectStrClear))\n\t\tmtchDsn = re.match(\".*DSN=([^;]+).*\",connectStrClear,re.IGNORECASE)\n\t\tif mtchDsn:\n\t\t\treturn mtchDsn.group(1)\n\t\tmtchDsn = re.match(\".*SERVER=([^;]+).*\",connectStrClear,re.IGNORECASE)\n\t\tif mtchDsn:\n\t\t\treturn mtchDsn.group(1)\n\t\treturn connectStrClear\n\n\n", "sub_path": "survol/sources_types/odbc/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 12627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "re.split", "line_number": 222, "usage_type": "call"}, {"api_name": "re.split", "line_number": 226, "usage_type": "call"}, {"api_name": "re.split", "line_number": 229, "usage_type": "call"}, {"api_name": "cgi.escape", "line_number": 233, "usage_type": "call"}, {"api_name": "cgi.escape", "line_number": 235, "usage_type": "call"}, {"api_name": "re.split", "line_number": 249, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 276, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 276, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 277, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 277, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 285, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 285, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 288, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 288, "usage_type": "attribute"}]} +{"seq_id": "272773372", "text": "from flask import Flask, jsonify, request\n\nfrom .models import *\nfrom .mongo_setup import global_init\n\napp = Flask(__name__)\n\nglobal_init()\n\n### user ###\n\n@app.route('/user')\ndef get_users():\n\tuser_data = User.get_all()\n\treturn jsonify({'users': user_data})\n\n@app.route('/user', methods=['POST'])\ndef add_user():\n\tdata = request.get_json()\n\tuser = User(email=data['email'],\n\t\t\t\tname=data['name'],\n\t\t\t\tpassword=data['password'],\n\t\t\t\tuser_type=data['user_type'])\n\tuser.save()\n\treturn jsonify({'new_user': user.json()})\n\n@app.route('/user/', methods=['GET'])\ndef get_user(user_id):\n\tuser = User.objects(id=user_id).get()\n\treturn jsonify({'user': user.json()})\n\n@app.route('/user/', methods=['PUT'])\ndef update_user(user_id):\n\tdata = request.get_json()\n\tuser = User.objects(id=user_id).get()\n\tuser.update(\n\t\temail=data['email'],\n\t\tname=data['name'],\n\t\tpassword=data['password'],\n\t\tuser_type=data['user_type'])\n\tuser.reload()\n\treturn jsonify({'updated_user': user.json()})\n\n@app.route('/user/', methods=['DELETE'])\ndef delete_user(user_id):\n\tuser = User.objects(id=user_id).get()\n\tuser_name = user.name\n\tuser.delete()\n\treturn jsonify({'deleted_user': user_name})\n\n@app.route('/user//first-course', methods=['GET'])\ndef user_first_course(user_id):\n\tuser = User.objects(id=user_id).get()\n\tfirst_course = User.objects().first()\n\tuser.available_courses.append(first_course)\n\tuser.save()\n\treturn jsonify({'first_course': first_course.name})\n\n@app.route('/user//first-lesson', methods=['GET'])\ndef user_first_course(user_id):\n\tuser = User.objects(id=user_id).get()\n\tfirst_course = Courses.objects().first()\n\tfirst_lesson = first_course.lessons.first()\n\tuser.available_lessons.append(first_lesson)\n\tuser.save()\n\treturn jsonify({'first_lesson': first_lesson.name})\n\n@app.route('/user//available-courses/', methods=['GET'])\ndef add_course_to_available_courses(user_id, course_id):\n\tuser = User.objects(id=user_id).get()\n\tcourse = Course.objects(id=course_id).get()\n\tuser.available_courses.append(course)\n\tuser.save()\n\treturn jsonify({\n\t\t'message': f'Course {course.name} added to available courses of {user.name}.'\n\t})\n\n@app.route('/user//available-lessons/', methods=['GET'])\ndef add_lesson_to_available_lessons(user_id, lesson_id):\n\tuser = User.objects(id=user_id).get()\n\tlesson = Lesson.objects(id=lesson_id).get()\n\tuser.available_lessons.append(lesson)\n\tuser.save()\n\tlast_course = user.available_courses[-1]\n\tlessons_in_last_course = last_course.lessons\n\ttotal_lessons_in_last_course = len(lessons_in_last_course)\n\ttotal_available_lessons = len(user.available_lessons)\n\tif total_available_lessons == total_lessons_in_last_course:\n\t\tdel user.available_lessons[:]\n\t\tnext_course = last_course.next_course_id\n\t\tuser.available_courses.append(next_course)\n\t\tnext_lesson = next_course.lessons[0]\n\t\tuser.available_lessons.append(next_lesson)\n\t\tuser.save()\n\t\treturn jsonify({\n\t\t\t'message': f'New course {next_course.name} and new {next_lesson.name}.'\n\t\t})\n\tnext_lesson = lesson.next_lesson_id\n\tuser.available_lessons.append(next_lesson)\n\tuser.save()\n\treturn jsonify({\n\t\t'message': f'Added lesson {next_lesson.name} to available lessons of {user.name}.'\n\t})\n\n### question ###\n\n@app.route('/question')\ndef get_questions():\n\tquestion_data = Question.get_all()\n\treturn jsonify({'questions': question_data})\n\n@app.route('/question', methods=['POST'])\ndef add_question():\n\tdata = request.get_json()\n\tquestion = Question(text=data['text'],\n\t\t\t\t\t\tdetail=data['detail'],\n\t\t\t\t\t\tscore=data['score'],\n\t\t\t\t\t\tquestion_type=data['question_type'])\n\tquestion.save()\n\treturn jsonify({'new_question': question.json()})\n\n@app.route('/question/', methods=['GET'])\ndef get_question(question_id):\n\tquestion = Question.objects(id=question_id).get()\n\treturn jsonify({'question': question.json()})\n\n@app.route('/question/', methods=['PUT'])\ndef update_question(question_id):\n\tdata = request.get_json()\n\tquestion = Question.objects(id=question_id).get()\n\tquestion.update(\n\t\ttext=data['text'],\n\t\tdetail=data['detail'],\n\t\tscore=data['score'],\n\t\tquestion_type=data['question_type'])\n\tquestion.reload()\n\treturn jsonify({'updated_question': question.json()})\n\n@app.route('/question/', methods=['DELETE'])\ndef delete_question(question_id):\n\tquestion = Question.objects(id=question_id).get()\n\tquestion_text = question.text\n\tquestion.delete()\n\treturn jsonify({'deleted_question': question_text})\n\n# POST /question//answers\n@app.route('/question//answers', methods=['POST'])\ndef add_answer_to_question(question_id):\n\tdata = request.get_json()\n\tquestion = Question.objects(id=question_id).get()\n\tanswer = Answer(text=data['text'],\n\t\t\t\t\tcorrect=data['correct'])\n\tquestion.answers.append(answer)\n\tquestion.save()\n\treturn jsonify({\n\t\t'message': f'Answer {answer.text} added to question {question.text}.'\n\t})\n\n### lesson ###\n\n@app.route('/lesson')\ndef get_lessons():\n\tlesson_data = Lesson.get_all()\n\treturn jsonify({'lessons': lesson_data})\n\n@app.route('/lesson', methods=['POST'])\ndef add_lesson():\n\tdata = request.get_json()\n\tlesson = Lesson(name=data['name'],\n\t\t\t\t\tapproval_score=data['approval_score'])\n\tlesson.save()\n\treturn jsonify({'new_lesson': lesson.json()})\n\n@app.route('/lesson/', methods=['GET'])\ndef get_lesson(lesson_id):\n\tlesson = Lesson.objects(id=lesson_id).get()\n\treturn jsonify({'lesson': lesson.json()})\n\n@app.route('/lesson/', methods=['PUT'])\ndef update_lesson(lesson_id):\n\tdata = request.get_json()\n\tlesson = Lesson.objects(id=lesson_id).get()\n\tlesson.update(\n\t\tname=data['name'],\n\t\tapproval_score=data['approval_score']\n\t)\n\tlesson.reload()\n\treturn jsonify({'updated_lesson': lesson.json()})\n\n@app.route('/lesson/', methods=['DELETE'])\ndef delete_lesson(lesson_id):\n\tlesson = Lesson.objects(id=lesson_id).get()\n\tquestions = lesson.questions\n\tfor question in questions:\n\t\tquestion.delete()\n\tlesson_name = lesson.name\n\tlesson.delete()\n\treturn jsonify({'deleted_lesson': lesson_name})\n\n@app.route('/lesson//question/', methods=['GET'])\ndef add_question_to_lesson(lesson_id, question_id):\n\tlesson = Lesson.objects(id=lesson_id).get()\n\tlesson_name = lesson.name\n\tquestion = Question.objects(id=question_id).get()\n\tquestion_text = question.text\n\tlesson.questions.append(question)\n\tlesson.save()\n\treturn jsonify({\n\t\t'message': f'Question {question.text} added to lesson {lesson.name}.'\n\t})\n\n@app.route('/lesson//next-lesson/', methods=['GET'])\ndef add_next_lesson_to_lesson(lesson_id, next_lesson_id):\n\tlesson = Lesson.objects(id=lesson_id).get()\n\tnext_lesson_id = Lesson.objects(id=next_lesson_id).get()\n\tlesson.next_lesson_id = next_lesson_id\n\tlesson.save()\n\treturn jsonify({\n\t\t'message': f'Next lesson {next_lesson_id.name} added to lesson {lesson.name}.'\n\t})\n\n@app.route('/lesson//full', methods=['GET'])\ndef get_lesson_full(lesson_id):\n\tlesson = Lesson.objects(id=lesson_id).get()\n\treturn jsonify({'lesson': lesson.json_full()})\n\n### course ###\n\n@app.route('/course')\ndef get_courses():\n\tcourse_data = Course.get_all()\n\treturn jsonify({'courses': course_data})\n\n@app.route('/course', methods=['POST'])\ndef add_course():\n\tdata = request.get_json()\n\tcourse = Course(name=data['name'])\n\tcourse.save()\n\treturn jsonify({'new_course': course.json()})\n\n@app.route('/course/', methods=['GET'])\ndef get_course(course_id):\n\tcourse = Course.objects(id=course_id).get()\n\treturn jsonify({'course': course.json()})\n\n@app.route('/course/', methods=['PUT'])\ndef update_course(course_id):\n\tdata = request.get_json()\n\tcourse = Course.objects(id=course_id).get()\n\tcourse.update(\n\t\tname=data['name']\n\t)\n\tcourse.reload()\n\treturn jsonify({'updated_course': course.json()})\n\n@app.route('/course/', methods=['DELETE'])\ndef delete_course(course_id):\n\tcourse = Course.objects(id=course_id).get()\n\tlessons = course.lessons\n\tfor lesson in lessons:\n\t\tlesson.delete()\n\tcourse_name = course.name\n\tcourse.delete()\n\treturn jsonify({'deleted_course': course_name})\n\n@app.route('/course//lesson/', methods=['GET'])\ndef add_lesson_to_course(course_id, lesson_id):\n\tcourse = Course.objects(id=course_id).get()\n\tcourse_name = course.name\n\tlesson = Lesson.objects(id=lesson_id).get()\n\tlesson_name = lesson.name\n\tcourse.lessons.append(lesson)\n\tcourse.save()\n\treturn jsonify({\n\t\t'message': f'Lesson {lesson.name} added to course {course.name}.'\n\t})\n\n@app.route('/course//next-course/', methods=['GET'])\ndef add_next_course_to_course(course_id, next_course_id):\n\tcourse = Course.objects(id=course_id).get()\n\tnext_course_id = Course.objects(id=next_course_id).get()\n\tcourse.next_course_id = next_course_id\n\tcourse.save()\n\treturn jsonify({\n\t\t'message': f'Next course {next_course_id.name} added to course {course.name}.'\n\t})\n\nif __name__ == '__main__':\n app.run(debug=True)", "sub_path": "api/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 8922, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "mongo_setup.global_init", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 125, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 129, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 137, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 144, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 164, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 168, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 177, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 181, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 188, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 198, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 208, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 218, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 232, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 236, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 236, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 239, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 244, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 248, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 248, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 254, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 264, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 274, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 284, "usage_type": "call"}]} +{"seq_id": "484461660", "text": "from config import imagenet_alexnet_config as config\nfrom pyimagesearch.nn.mxconv import MxAlexNet\nimport mxnet as mx\nimport argparse\nimport logging\nimport json\nimport os\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-c\", \"--checkpoints\", required=True, help=\"path to output checkpoint directory\")\nap.add_argument(\"-p\", \"--prefix\", required=True, help=\"name of model prefix\")\nap.add_argument(\"-s\", \"--start-epoch\", type=int, default=0, help=\"epoch to restart training at\")\nargs = vars(ap.parse_args())\n\nlogging.basicConfig(level=logging.DEBUG, filename=\"training_{}.log\".format(args[\"start_epoch\"]), filemode=\"w\")\n\nmeans = json.loads(open(config.DATASET_MEAN).read())\nbatchSize = config.BATCH_SIZE * config.NUM_DEVICES\nprint(config.TRAIN_MX_REC)\ntrainIter = mx.io.ImageRecordIter(\n path_imgrec=config.TRAIN_MX_REC,\n data_shape=(3, 227, 227),\n batch_size=batchSize,\n rand_crop=True,\n rand_mirror=True,\n rotate=15,\n mx_shear_ratio=0.1,\n mean_r=means[\"R\"],\n mean_g=means[\"G\"],\n mean_b=means[\"B\"],\n preprocess_threads=config.NUM_DEVICES * 2\n)\n\nvalIter = mx.io.ImageRecordIter(\n path_imgrec=config.VAL_MX_REC,\n data_shape=(3, 227, 227),\n batch_size=batchSize,\n mean_r=means[\"R\"],\n mean_g=means[\"G\"],\n mean_b=means[\"B\"]\n)\n\nopt = mx.optimizer.SGD(learning_rate=1e-3, momentum=0.9, wd=0.0005, rescale_grad=1.0 / batchSize)\n\ncheckpointsPath = os.path.sep.join([args[\"checkpoints\"], args[\"prefix\"]])\nargParams = None\nauxParams = None\n\nif args[\"start_epoch\"] <= 0:\n print(\"[INFO] building network...\")\n model = MxAlexNet.build(config.NUM_CLASSES)\nelse:\n print(\"[INFO] loading epoch {}...\".format(args[\"start_epoch\"]))\n model = mx.model.FeedForward.load(checkpointsPath, args[\"start_epoch\"])\n\n argParams = model.arg_params\n auxParams = model.aux_params\n model = model.symbol\n\nmodel = mx.model.FeedForward(\n ctx=[mx.gpu(0)],\n symbol=model,\n initializer=mx.initializer.Xavier(),\n arg_params=argParams,\n aux_params=auxParams,\n optimizer=opt,\n num_epoch=30,\n begin_epoch=args[\"start_epoch\"]\n)\n\nbatchEndCBs = [mx.callback.Speedometer(batchSize, 500)]\nepochEndCBs = [mx.callback.do_checkpoint(checkpointsPath)]\nmetrics = [mx.metric.Accuracy(), mx.metric.TopKAccuracy(top_k=5), mx.metric.CrossEntropy()]\n\nprint(\"[INFO] training network...\")\nmodel.fit(X=trainIter,\n eval_data=valIter,\n eval_metric=metrics,\n batch_end_callback=batchEndCBs,\n epoch_end_callback=epochEndCBs)", "sub_path": "3.05.0-train_alexnet.py", "file_name": "3.05.0-train_alexnet.py", "file_ext": "py", "file_size_in_byte": 2487, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 15, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 17, "usage_type": "call"}, {"api_name": "config.imagenet_alexnet_config.DATASET_MEAN", "line_number": 17, "usage_type": "attribute"}, {"api_name": "config.imagenet_alexnet_config", "line_number": 17, "usage_type": "name"}, {"api_name": "config.imagenet_alexnet_config.BATCH_SIZE", "line_number": 18, "usage_type": "attribute"}, {"api_name": "config.imagenet_alexnet_config", "line_number": 18, "usage_type": "name"}, {"api_name": "config.imagenet_alexnet_config.NUM_DEVICES", "line_number": 18, "usage_type": "attribute"}, {"api_name": "config.imagenet_alexnet_config.TRAIN_MX_REC", "line_number": 19, "usage_type": "attribute"}, {"api_name": "config.imagenet_alexnet_config", "line_number": 19, "usage_type": "name"}, {"api_name": "mxnet.io.ImageRecordIter", "line_number": 20, "usage_type": "call"}, {"api_name": "mxnet.io", "line_number": 20, "usage_type": "attribute"}, {"api_name": "config.imagenet_alexnet_config.TRAIN_MX_REC", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.imagenet_alexnet_config", "line_number": 21, "usage_type": "name"}, {"api_name": "config.imagenet_alexnet_config.NUM_DEVICES", "line_number": 31, "usage_type": "attribute"}, {"api_name": "config.imagenet_alexnet_config", "line_number": 31, "usage_type": "name"}, {"api_name": "mxnet.io.ImageRecordIter", "line_number": 34, "usage_type": "call"}, {"api_name": "mxnet.io", "line_number": 34, "usage_type": "attribute"}, {"api_name": "config.imagenet_alexnet_config.VAL_MX_REC", "line_number": 35, "usage_type": "attribute"}, {"api_name": "config.imagenet_alexnet_config", "line_number": 35, "usage_type": "name"}, {"api_name": "mxnet.optimizer.SGD", "line_number": 43, "usage_type": "call"}, {"api_name": "mxnet.optimizer", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.sep.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pyimagesearch.nn.mxconv.MxAlexNet.build", "line_number": 51, "usage_type": "call"}, {"api_name": "pyimagesearch.nn.mxconv.MxAlexNet", "line_number": 51, "usage_type": "name"}, {"api_name": "config.imagenet_alexnet_config.NUM_CLASSES", "line_number": 51, "usage_type": "attribute"}, {"api_name": "config.imagenet_alexnet_config", "line_number": 51, "usage_type": "name"}, {"api_name": "mxnet.model.FeedForward.load", "line_number": 54, "usage_type": "call"}, {"api_name": "mxnet.model", "line_number": 54, "usage_type": "attribute"}, {"api_name": "mxnet.model.FeedForward", "line_number": 60, "usage_type": "call"}, {"api_name": "mxnet.model", "line_number": 60, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 61, "usage_type": "call"}, {"api_name": "mxnet.initializer.Xavier", "line_number": 63, "usage_type": "call"}, {"api_name": "mxnet.initializer", "line_number": 63, "usage_type": "attribute"}, {"api_name": "mxnet.callback.Speedometer", "line_number": 71, "usage_type": "call"}, {"api_name": "mxnet.callback", "line_number": 71, "usage_type": "attribute"}, {"api_name": "mxnet.callback.do_checkpoint", "line_number": 72, "usage_type": "call"}, {"api_name": "mxnet.callback", "line_number": 72, "usage_type": "attribute"}, {"api_name": "mxnet.metric.Accuracy", "line_number": 73, "usage_type": "call"}, {"api_name": "mxnet.metric", "line_number": 73, "usage_type": "attribute"}, {"api_name": "mxnet.metric.TopKAccuracy", "line_number": 73, "usage_type": "call"}, {"api_name": "mxnet.metric.CrossEntropy", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "118041615", "text": "import numpy as np\nimport pandas as pd\nimport os\nimport logging\nimport math\nfrom margin_lib import Margin\n\n##############################\n# Setup Logging Configuration\n##############################\nlogger = logging.getLogger(os.path.basename(__file__))\nif not len(logger.handlers):\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s|%(name)s === %(message)s ===', datefmt='%Y-%m-%d %I:%M:%S')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n file_handler = logging.FileHandler('log.txt')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.DEBUG)\n logger.addHandler(file_handler)\n###############################\n\nclass DeltaMargin(Margin):\n\n def __init__(self):\n Margin.__init__(self, 'Delta')\n\n def net_sensitivities(self, pos, params):\n risk_class = pos.RiskClass.unique()[0]\n\n if risk_class == 'IR':\n factor_group = ['ProductClass', 'RiskType', 'Qualifier', 'Bucket', 'Label1', 'Label2', 'RiskClass']\n elif risk_class in ['CreditQ', 'CreditNonQ']:\n factor_group = ['ProductClass', 'RiskType', 'Qualifier', 'Bucket', 'Label1', 'RiskClass']\n elif risk_class in ['Equity', 'Commodity']:\n factor_group = ['ProductClass', 'RiskType', 'Qualifier', 'Bucket', 'RiskClass']\n elif risk_class == 'FX':\n factor_group = ['ProductClass', 'RiskType', 'Qualifier', 'RiskClass']\n\n pos_gp = pos.groupby(factor_group)\n pos_delta = pos_gp.agg({'AmountUSD': np.sum})\n pos_delta.reset_index(inplace=True)\n\n pos_inflation = pos[pos.RiskType == 'Risk_Inflation'].copy()\n if len(pos_inflation) > 0:\n agg_amount = pos_inflation.AmountUSD.sum() # issue unsolved: should not aggregate different inflation currency together\n pos_inflation = pos_inflation[factor_group].copy()\n pos_inflation.drop_duplicates(inplace=True)\n pos_inflation['AmountUSD'] = agg_amount\n\n pos_delta = pd.concat([pos_delta, pos_inflation])\n\n return pos_delta\n\n def find_factor_idx(self, tenor_factor, curve_factor, tenors, curves, risk_class):\n idx = 0\n\n if risk_class == 'IR':\n for tenor in tenors:\n for curve in curves:\n if tenor_factor == tenor and curve_factor == curve:\n return idx\n else:\n idx = idx + 1\n\n elif risk_class in ['CreditQ', 'CreditNonQ']:\n for tenor in tenors:\n if tenor_factor == tenor:\n return idx\n else:\n idx = idx + 1\n\n return -1\n\n def build_risk_factors(self, pos_gp, params):\n\n risk_class = pos_gp.RiskClass.unique()[0]\n\n if risk_class == 'IR':\n pos_inflation = pos_gp[pos_gp.RiskType == 'Risk_Inflation'].copy()\n\n gp_curr = pos_gp.Qualifier.unique()[0]\n\n curve = params.IR_Sub_Curve\n if gp_curr == 'USD':\n curve = params.IR_USD_Sub_Curve\n\n s = np.zeros(len(params.IR_Tenor) * len(curve))\n if len(pos_inflation) > 0:\n s = np.zeros(len(params.IR_Tenor) * len(curve) + 1)\n\n for i, row in pos_gp.iterrows():\n idx = self.find_factor_idx(row['Label1'], row['Label2'], params.IR_Tenor, curve, risk_class)\n if idx >= 0:\n s[idx] = row['AmountUSD']\n\n if len(pos_inflation) > 0:\n s[len(s) - 1] = pos_inflation.AmountUSD\n\n elif risk_class in ['CreditQ', 'CreditNonQ']:\n\n if risk_class == 'CreditQ':\n tenors = params.CreditQ_Tenor\n else:\n tenors = params.CreditNonQ_Tenor\n\n s = np.zeros(pos_gp.Qualifier.nunique() * len(tenors))\n\n for j in range(pos_gp.Qualifier.nunique()):\n pos_gp_qualifier = pos_gp[\n pos_gp.Qualifier == pos_gp.sort_values(['Qualifier']).Qualifier.unique()[j]].copy()\n\n for i, row in pos_gp_qualifier.iterrows():\n idx = self.find_factor_idx(row['Label1'], [], tenors, [], risk_class)\n if idx >= 0:\n s[idx + j * len(tenors)] = row['AmountUSD']\n\n else:\n s = np.zeros(pos_gp.Qualifier.nunique())\n index = 0\n\n for i, row in pos_gp.iterrows():\n s[index] = row['AmountUSD'] #s[i] will go about of bound if input has two comdty bucket\n index = index+1\n return s\n\n def build_risk_weights(self, pos_gp, params):\n risk_class = pos_gp.RiskClass.unique()[0]\n\n if risk_class == 'IR':\n bucket = pd.DataFrame(pos_gp.Bucket.unique(), columns=['curr_type'])\n RW = pd.merge(bucket, params.IR_Weights, left_on=['curr_type'], right_on=['curr'], how='inner')\n RW = RW.drop(['curr_type', 'curr'], axis=1)\n RW = RW.as_matrix()\n\n gp_curr = pos_gp.Qualifier.unique()[0]\n\n curve = params.IR_Sub_Curve\n if gp_curr == 'USD':\n curve = params.IR_USD_Sub_Curve\n\n RW = np.repeat(RW, len(curve))\n\n pos_inflation = pos_gp[pos_gp.RiskType == 'Risk_Inflation'].copy()\n if len(pos_inflation) > 0:\n RW = np.append(RW, params.IR_Inflation_Weights)\n else:\n if risk_class == 'CreditQ':\n weights = params.CreditQ_Weights\n num_factors = len(pos_gp.Qualifier) * len(params.CreditQ_Tenor)\n elif risk_class == 'CreditNonQ':\n weights = params.CreditNonQ_Weights\n num_factors = len(pos_gp.Qualifier) * len(params.CreditNonQ_Tenor)\n elif risk_class == 'Equity':\n weights = params.Equity_Weights\n num_factors = len(pos_gp.Qualifier)\n elif risk_class == 'Commodity':\n weights = params.Commodity_Weights\n num_factors = len(pos_gp.Qualifier)\n elif risk_class == 'FX':\n weights = params.FX_Weights\n num_factors = len(pos_gp.Qualifier)\n\n if risk_class != 'FX':\n bucket = pd.DataFrame(pos_gp.Bucket.unique(), columns=['bucket'])\n RW = pd.merge(bucket, weights, left_on=['bucket'], right_on=['bucket'], how='inner')\n RW = np.array(RW.weight.values[0])\n else:\n RW = np.array([weights])\n\n RW = np.repeat(RW, num_factors)\n\n return RW\n\n def margin_risk_group(self, gp, params):\n\n risk_class = gp.RiskClass.unique()[0]\n\n if risk_class in ['IR', 'FX']:\n logger.info('Calculate {0} Delta Margin for {1}'.format(risk_class, gp.Qualifier.unique()))\n else:\n logger.info('Calculate {0} Delta Margin for {1}'.format(risk_class, gp.Bucket.unique()))\n\n s = self.build_risk_factors(gp, params)\n RW = self.build_risk_weights(gp, params)\n CR = self.build_concentration_risk(gp, params)\n\n WS = RW * s * CR\n\n Corr = self.build_in_bucket_correlation(gp, params)\n\n K = np.mat(WS) * np.mat(Corr) * np.mat(np.reshape(WS, (len(WS), 1))) ##not clear here\n K = math.sqrt(K.item(0))\n\n if gp.RiskType.nunique() > 1:\n risk_type = '_'.join(gp.RiskType.unique())\n else:\n risk_type = gp.RiskType.unique()[0]\n\n ret = gp[['ProductClass', 'RiskClass']].copy()\n ret.drop_duplicates(inplace=True)\n ret['RiskType'] = risk_type\n ret['K'] = K\n ret['S'] = max(min(WS.sum(), K), -K)\n\n if risk_class == 'IR':\n ret['CR'] = CR\n else:\n ret['CR'] = CR[0]\n\n if risk_class == 'IR':\n ret['Group'] = gp['Qualifier'].unique()[0]\n elif risk_class == 'FX':\n ret['Group'] = gp['RiskType'].unique()[0]\n else:\n ret['Group'] = gp['Bucket'].unique()[0]\n\n return ret\n\n\n", "sub_path": "delta_margin.py", "file_name": "delta_margin.py", "file_ext": "py", "file_size_in_byte": 8077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 13, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 22, "usage_type": "attribute"}, {"api_name": "margin_lib.Margin", "line_number": 26, "usage_type": "name"}, {"api_name": "margin_lib.Margin.__init__", "line_number": 29, "usage_type": "call"}, {"api_name": "margin_lib.Margin", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 122, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 149, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 168, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.mat", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 195, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "385013269", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport time\nimport json\nfrom datetime import datetime\nimport threading\nimport random\n\nimport requests\nimport redis\nfrom loguru import logger\n\n\n# 是否使用代理(如果为True,则代理按 127.0.0.1:8080-8232;如果为False,则不使用代理)\nproxies = True\n\nliquidation_info_dict = []\nlast_liquidation_info_dict = []\n\n\nclass OkexSpider(object):\n def __init__(self):\n self.redis_connect = redis.Redis(host='47.107.228.85', port=6379, password='20ab20!2#Spider!alxmH')\n self.last_time = 0\n\n def main(self, index):\n logger.info('数字货币:{} {} 强制平仓数据获取开始...'.format(liquidation_info_dict[index]['symbol'], liquidation_info_dict[index]['timeid']))\n\n while True:\n liquidation = liquidation_info_dict[index]\n\n self.symbol = liquidation['symbol']\n self.timeid = liquidation['timeid']\n self.liquidation_url = liquidation['liquidation_url']\n self.redis_key = \"okex:futures:liquidation:{}_{}_forced_liquidation\".format(self.symbol.split(\"-\")[0], self.timeid)\n\n\n try:\n result_list = requests.get(self.liquidation_url, proxies=proxy()).json()\n data_list = result_list[::-1]\n for data in data_list:\n utc_time = data.get(\"created_at\").replace(\"T\", \" \").replace(\"Z\", \"\")\n struct_time = datetime.strptime(utc_time, \"%Y-%m-%d %H:%M:%S.%f\")\n t = int(time.mktime(struct_time.timetuple()))\n if t > self.last_time:\n item = {}\n item[\"Time\"] = t\n item[\"Pair1\"] = self.symbol.split(\"-\")[0]\n item[\"Pair2\"] = self.symbol.split(\"-\")[1]\n item[\"Title\"] = self.timeid\n item[\"Price\"] = float(data['price'])\n item[\"Liquidation\"] = \"Long\" if data['type'] == \"3\" else \"Short\"\n item[\"Volume\"] = int(data['size'])\n item[\"USD\"] = int(data['size']) * 100 if item[\"Pair1\"] == 'BTC' else int(data['size']) * 10\n\n while True:\n try:\n self.redis_connect.lpush(self.redis_key, json.dumps(item))\n break\n except:\n self.redis_connect = redis.Redis(host='47.107.228.85', port=6379, password='20ab20!2#Spider!alxmH')\n\n self.last_time = t\n logger.info(item)\n else:\n continue\n except Exception as e:\n logger.error(e)\n logger.error('数字货币: {}-USD-{} connect ws error, retry...'.format(self.symbol, self.timeid))\n\n\ndef proxy():\n global proxies\n if proxies:\n return {\"https\": \"http://127.0.0.1:{}\".format(random.randint(8080, 8232))}\n else:\n return {\"https\": \"http://127.0.0.1:1080\"}\n\ndef get_liquidation():\n while True:\n try:\n futures_url = \"https://www.okex.com/api/futures/v3/instruments\"\n swap_url = \"https://www.okex.com/api/swap/v3/instruments\"\n\n # proxies = {\"https\": \"http://127.0.0.1:{}\".format(random.randint(8080, 8323))}\n\n futures_list = requests.get(futures_url, proxies=proxy()).json()\n futures_list += requests.get(swap_url, proxies=proxy()).json()\n\n futures_symbol_list = []\n for futures in futures_list:\n if futures.get(\"quote_currency\") == \"USD\":\n item = {}\n item['symbol'] = futures['instrument_id']\n timeid = futures.get('alias')\n if timeid == 'this_week':\n item['timeid'] = 'CW'\n elif timeid == 'next_week':\n item['timeid'] = 'NW'\n elif timeid == 'quarter':\n item['timeid'] = 'CQ'\n elif timeid == 'bi_quarter':\n item['timeid'] = 'NQ'\n else:\n item['timeid'] = 'SWAP'\n futures_symbol_list.append(item)\n\n futures_liquidation_url = \"https://www.okex.com/api/futures/v3/instruments/{}/liquidation?status=1\"\n swap_liquidation_url = \"https://www.okex.com/api/swap/v3/instruments/{}/liquidation?status=1\"\n\n liquidation_list = []\n for futures_symbol in futures_symbol_list:\n if futures_symbol['timeid'] == 'SWAP':\n futures_symbol.setdefault('liquidation_url', swap_liquidation_url.format(futures_symbol.get(\"symbol\")))\n else:\n futures_symbol.setdefault('liquidation_url', futures_liquidation_url.format(futures_symbol.get(\"symbol\")))\n # logger.info(futures_symbol)\n liquidation_list.append(futures_symbol)\n\n\n # 获取OKEx 所有合约对的 强平订单信息\n # [{'symbol': 'XRP-USD-200221', 'timeid': 'CW', 'liquidation_url': 'https://www.okex.com/api/futures/v3/instruments/XRP-USD-200221/liquidation?status=1'}]\n # print(liquidation_list)\n # print(len(liquidation_list))\n\n global liquidation_info_dict, last_liquidation_info_dict\n # list\n #_ = [liquidation.setdefault('index', index) for index, liquidation in enumerate(liquidation_list) if \"BTC\" in liquidation['symbol']]\n # liquidation_info_dict = liquidation_list\n\n # dict\n liquidation_info_dict = {index: liquidation for index, liquidation in enumerate(liquidation_list)}\n if last_liquidation_info_dict != liquidation_info_dict:\n last_liquidation_info_dict = liquidation_info_dict\n for liquidation_info in liquidation_info_dict.items():\n logger.info(liquidation_info)\n\n time.sleep(5)\n except Exception as e:\n logger.error(e)\n\nif __name__ == \"__main__\":\n\n # 子线程组\n thread_list = []\n\n t = threading.Thread(target=get_liquidation, args=())\n thread_list.append(t)\n t.start()\n\n while True:\n if liquidation_info_dict:\n for index in liquidation_info_dict:\n spider = OkexSpider()\n t = threading.Thread(target=spider.main, args=(index,))\n thread_list.append(t)\n t.start()\n break\n time.sleep(1)\n\n while True:\n length = len(threading.enumerate())\n logger.info('当前运行的线程数为:%d' % length)\n time.sleep(60)\n if length <= 1:\n break\n\n # 主线程等待子线程执行完毕\n for t in thread_list:\n t.join()\n\n", "sub_path": "liquidation/okex_liquidation.py", "file_name": "okex_liquidation.py", "file_ext": "py", "file_size_in_byte": 6875, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "redis.Redis", "line_number": 24, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 28, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 28, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}, {"api_name": "time.mktime", "line_number": 45, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 62, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 65, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 65, "usage_type": "name"}, {"api_name": "loguru.logger.error", "line_number": 69, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 69, "usage_type": "name"}, {"api_name": "loguru.logger.error", "line_number": 70, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 70, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 76, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 88, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 89, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 137, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 137, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 139, "usage_type": "call"}, {"api_name": "loguru.logger.error", "line_number": 141, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 141, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 148, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 156, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 160, "usage_type": "call"}, {"api_name": "threading.enumerate", "line_number": 163, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 164, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 164, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 165, "usage_type": "call"}]} +{"seq_id": "570600283", "text": "import requests\nimport cv2\nimport argparse\n\nimport json\nimport traceback\nimport os\nimport uuid\nimport time\n\nprefix = '../ml/'\noutput_path = os.path.join(prefix, 'output/')\n\ncap = cv2.VideoCapture(0)\n\n\"\"\"A simple example of a client using the FaceRecognition application.\n The client can send a video stream (i.e. a serie of images) or a single image as a POST request.\n\n The request has a multipart/form-data type, see for reference:\n https://www.w3.org/Protocols/rfc1341/7_2_Multipart.html\n\n\"\"\"\n\ndef draw(img, result):\n\n\t\"\"\"A function used in a video stream prediction. It draws a rectangle and a textbox with associated label. \n \n\t Args:\n\t Img.\n\t Result - a tuple obtained from the server.\n\n\t \"\"\"\n\n\ttry:\n\n\t\twrite = False\n\n\t\tfor person, c, face in result:\n\n\t\t\tx, y, w, h, yName = face\n\n\t\t\tif c > 0:\n\n\t\t\t\ttext = person + ' ' + str(round(c * 100, 2)) + '%'\n\t\t\t\tcolor = (0, 255, 0)\n\n\t\t\t\tcv2.rectangle(img, (x, y), (x+w, y+h), color, 2)\n\t\t\t\tcv2.putText(img, text, (x, yName), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 1)\n\t\t\t\twrite = True\n\n\texcept Exception as e:\n\n\t\ttrc = traceback.format_exc()\n\t\tprint('Exception during drawing: ' + str(e) + '\\n' + trc)\t\n\n\tcv2.imshow('Image', img)\n\n\tif write == True: cv2.imwrite(output_path + str(uuid.uuid4()) + '.jpg', img)\n\ndef send_stream():\n\n\t\"\"\"Send images to the server until 'q' is pressed.\n \"\"\"\n\n\twhile(True):\n\n\t\tret, frame = cap.read()\n\t\t\n\t\tsend_img(frame)\n\n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\n\t\t\tbreak\t\n\ndef send_img(path):\n\n\t\"\"\"Send a single image to the server. \n \n\t Args:\n\t Path - a path to the image to be sent.\n\t \"\"\"\n\n\tfile = {\n\n\t\t 'image': (path, open(path, 'rb'))\t \n\t}\n\n\tstudents_list = ['User' + str(i) for i in range(1, 32)]\n\n\tdata = {\n\n\t\t 'school_name': 'example_school',\n\t 'students_list': json.dumps(students_list),\n\t 'endpoint': 'rec17'\n\t}\n\n\tapi_url = 'https://52rpf30bb4.execute-api.eu-west-2.amazonaws.com/api/'\n\tresponse = requests.post(api_url, data=data, files=file)\n\t\n\tparsed = json.loads(response.text)\n\tprint(json.dumps(parsed, indent=1, sort_keys=True))\n\nif __name__ == '__main__':\n\t\n\tparser = argparse.ArgumentParser()\t\n\n\tparser.add_argument('mode',\n\t\t\t\t\t\thelp = '[webcam, image]',\n\t\t\t\t\t\tdefault = '-',\n\t\t\t\t\t\tnargs='?',\n\t\t\t\t\t\t)\n\n\tparser.add_argument('image_path',\n\t\t\t\t\t\thelp = 'path to the image',\n\t\t\t\t\t\tdefault = '-',\n\t\t\t\t\t\tnargs='?',\n\t\t\t\t\t\t)\n\n\targs = parser.parse_args()\n\n\tif args.mode == 'webcam':\n\n\t\tcap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) #1280 \n\t\tcap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) #720\n\t\tsend_stream()\n\n\telif args.mode == 'image':\n\n\t\tsend_img(args.image_path)\n", "sub_path": "opt/program/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 2601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 48, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 58, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 71, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 93, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 98, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 100, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 101, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 105, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 123, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 124, "usage_type": "attribute"}]} +{"seq_id": "575074711", "text": "import argparse\nimport requests\nimport re\nfrom lib.config import Config\nfrom lib.check import check_ip\nfrom lib.logger import log_err, log_msg\n\n\nUPDATE_NAMECHEAP_URL = 'https://dynamicdns.park-your-domain.com/update'\n\n\ndef update_records(domain):\n ip = domain.ip if domain.ip != 'check' else check_ip()\n for host in domain.hosts:\n resp = requests.get(UPDATE_NAMECHEAP_URL, params={\n 'domain': domain.name,\n 'host': host,\n 'password': domain.password,\n 'ip': ip\n }, timeout=1)\n if resp.status_code != 200 or not re.search('0<\\/ErrCount>',resp.text):\n log_err('Namecheap responded to request with status {} and message {}'\n .format(resp.status_code, resp.text))\n else:\n log_msg('Successfully updated IP for {}.{} to {}'.format(host,domain.name,ip))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Namecheap dynamic DNS updater')\n parser.add_argument('--config', '-c', default='namecheap.cfg',\n help=\"Configuration file path. Default: search in ./, ~/, /etc\")\n args = parser.parse_args()\n\n search_path = ['./', '~/', '/etc']\n\n config = Config(args.config, search_path)\n\n for domain in config.domains:\n update_records(domain)\n", "sub_path": "namecheap-dyndns.py", "file_name": "namecheap-dyndns.py", "file_ext": "py", "file_size_in_byte": 1307, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "lib.check.check_ip", "line_number": 13, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "re.search", "line_number": 21, "usage_type": "call"}, {"api_name": "lib.logger.log_err", "line_number": 22, "usage_type": "call"}, {"api_name": "lib.logger.log_msg", "line_number": 25, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 29, "usage_type": "call"}, {"api_name": "lib.config.Config", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "532051451", "text": "#!/usr/bin/env python3\n# A04 decoder logic by Eyes, inspired by Monocle\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport hashlib\nimport binascii\nimport itertools\n\n\ndef decode(msg, key, hashsys, verbose, very_verbose, mutate, mutate_key, block_size):\n asci = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890',.?!@\\/ '~[]{}-_=+<>|$%^&*():;`\\\"\"\n if verbose:\n print(\"Generating block list of size\", block_size)\n bs_list = [''.join(i) for i in itertools.product(asci, repeat=int(block_size))]\n if verbose:\n print(\"Done.\")\n\n msg = msg.replace('\\r', ' ')\n msg = msg.replace('\\n', ' ')\n msg = msg.replace(' ', ' ')\n key = key.replace('\\r', '').replace('\\n', '')\n new_msg = msg.split(' ')\n strlen = len(new_msg)\n swap = True\n decoded = \"\"\n\n try:\n hash = hashlib.new(str(hashsys))\n except ValueError:\n print(\"Error: hash {h} is not supported, defaulting to sha1\".format(h=hashsys))\n hashsys = 'sha1'\n kc = -1\n for n, x in enumerate(new_msg):\n if verbose or very_verbose:\n print(\"Decoding message part {x} [{n1}/{n2}]\".format(x=x, n1=n + 1, n2=len(new_msg)))\n part_decoded = False\n aa = None\n ii = None\n for i in bs_list:\n if n != 0 and n % 2 != 0:\n kc += 1\n mutate_ = mutate_key.split(':')\n\n full_hash = new_msg[n] + new_msg[n - 1]\n hash = hashlib.new(str(hashsys))\n if not mutate:\n hash.update(i.encode() + key.encode())\n else:\n try:\n rndbit = binascii.unhexlify(mutate_[kc].encode('utf-8'))\n except IndexError:\n print(\"Error: Mutator key for part\", n, \"was not supplied\")\n break\n hash.update(i.encode() + rndbit + key.encode())\n if hash.hexdigest() == full_hash:\n if very_verbose:\n print(\"{fh} == {h}, decoded\".format(fh=full_hash, h=hash.hexdigest()))\n decoded += i\n ii = i\n part_decoded = True\n break\n else:\n continue\n else:\n continue\n if part_decoded:\n if verbose or very_verbose:\n print(\"{fh} -> {i}\".format(fh=full_hash, i=ii))\n else:\n if verbose or very_verbose: print(\"No result found for hash, are you using the right key?\")\n # for i in asci:\n #decoded = decoded.replace(i+i, i)\n\n # decoded = decoded.replace(\"/\", \"\")\n return decoded\n\n\nif __name__ == \"__main__\":\n import sys\n if sys.version_info[0] == 2:\n print(\"Python 2 is not supported\")\n sys.exit(1)\n import argparse\n\n parser = argparse.ArgumentParser(description='Decodes text using Eyesgulis.')\n parser.add_argument('-m', '--msg', help='message to decode', metavar='MSG', nargs='+')\n parser.add_argument('-k', '--key', help='secret key for decoding', metavar='KEY', nargs='+')\n parser.add_argument('-H', '--hash', default='sha1',\n choices=list(hashlib.algorithms_available),\n help='the hashing mechanism (default: %(default)s)', metavar='MECHANISM')\n parser.add_argument('-v', '--verbose', action='store_true', default=False,\n help='ask for additional output (default: do not)')\n parser.add_argument('-V', '--very-verbose', action='store_true', default=False, help='even more verbose output')\n parser.add_argument('-f', '--file', help='file to open instead of message')\n parser.add_argument('-M', '--mutate', action='store_true', default=False, help='decrypt mutating key')\n parser.add_argument('-K', '--mutate-key', help='mutator key generated by encoder', nargs='+', metavar='MUTATE_KEY',\n default='')\n parser.add_argument('--hashes', help='print available hashes', action='store_true', default=False)\n parser.add_argument('-b', '--block-size', help='define block size', default=2)\n\n args = parser.parse_args()\n\n if args.hashes:\n print('Hashes available:', ' '.join(list(hashlib.algorithms_available)))\n sys.exit(0)\n\n if args.msg is None and args.file is None:\n args.msg = input('Message to decode: ')\n elif args.msg is not None and args.file is None:\n args.msg = ' '.join(args.msg)\n if args.key is None:\n args.key = input('Key for message: ')\n else:\n args.key = ' '.join(args.key)\n if args.file is not None:\n if os.path.exists(args.file):\n with open(args.file, 'r') as f: args.msg = f.read()\n if args.mutate:\n if args.mutate_key is None:\n args.mutate_key = input('Mutator key: ')\n else:\n args.mutate_key = ' '.join(args.mutate_key)\n else:\n args.mutate_key = ''\n print('Decoding...')\n # print(args.mutate_key)\n decoded = decode(args.msg, args.key, args.hash,\n args.verbose, args.very_verbose, args.mutate, args.mutate_key, args.block_size)\n #print('Decoded message:', decode(args.msg, args.key, args.hash, args.verbose, args.very_verbose))\n if decoded == '':\n print(\"Decoder returned nothing. Are you using the right key/hashing system?\")\n else:\n print(\"Decoded message:\", decoded)\n", "sub_path": "decoder.py", "file_name": "decoder.py", "file_ext": "py", "file_size_in_byte": 5420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "itertools.product", "line_number": 15, "usage_type": "call"}, {"api_name": "hashlib.new", "line_number": 29, "usage_type": "call"}, {"api_name": "hashlib.new", "line_number": 46, "usage_type": "call"}, {"api_name": "binascii.unhexlify", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 83, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 86, "usage_type": "call"}, {"api_name": "hashlib.algorithms_available", "line_number": 90, "usage_type": "attribute"}, {"api_name": "hashlib.algorithms_available", "line_number": 105, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}]} +{"seq_id": "532625024", "text": "\"\"\"This module contains a set of Bokeh visualization cont \n\nAttributes:\n *\nClasses:\n * \nFunctions:\n * \nTodo:\n * Adding Python Docstrings\n * \n\"\"\"\nfrom LibOM.Tools import *\nfrom LibOM.GuiInterfaces import *\nimport pickle, csv\nimport re, math\n\nfrom bokeh.models import ColumnDataSource\n\n# BokehControler:\n# - At init: Initialize BOKEH ColumnDataSources.\n# - Whenever scoreboard is updated update CDSes.\n\nclass BokehControler:\n\n def __init__(self, ScoreBoard, MakerDictionary):\n # influencers:\n self.SB = ScoreBoard\n self.MD = MakerDictionary\n self.Influencers = None\n self.Boards = None\n self.ActiveInfluencer = None\n self.ActiveBoard = None\n\n def populateInfluencers(self, names, categories,\n stype = 'per_tweet',\n origin={'x': 0, 'y': 0},\n rotation= math.pi / 6):\n Xpoints = list()\n Ypoints = list()\n XSegments = list()\n YSegments = list()\n Colors = list()\n Memes = list()\n Names = list()\n Ntweets = list()\n Offsets = list()\n Sizes = list()\n Scores = list()\n for username in names:\n total = self.SB.get_score_one(username, 'all', stype)\n if not total: continue\n\n sub_scores = self.SB.get_scores(username, categories, stype)\n sub_scores = {k:v for k, v in sub_scores.items() if v}\n #sub_total = reduce(lambda x, y: x + y, [v for v in sub_scores.values() if v], 0.0)\n #sub_scores['making'] = total - sub_total if sub_total < total else 0\n ntweet = self.SB.table[username]['ntweets']\n scores = sub_scores\n scores[-1] = total\n\n sorted_scores = sorted(scores.items(), key=lambda x: x[1])\n memes = [self.MD.get_category_name(x[0]) for x in sorted_scores]\n npoints = len(memes)\n weights = [x[1] for x in sorted_scores]\n\n # rotate, this gets the total score at the head of the list:\n memes = memes[-1:] + memes[:-1]\n weights = weights[-1:] + weights[:-1]\n sizes = [round(w * 100 + 20) for w in weights]\n coordinates = get_spiral_locations(npoints, center=origin, diameters=sizes, teta=rotation)\n xpoints = [coord[0] for coord in coordinates]\n ypoints = [coord[1] for coord in coordinates]\n x_segments = [[origin['x'], x] for x in xpoints]\n y_segments = [[origin['y'], y] for y in ypoints]\n colors = ['olive'] * npoints\n\n Xpoints.extend(xpoints)\n Ypoints.extend(ypoints)\n Colors.extend(colors)\n Memes.extend(memes)\n Offsets.extend([x / 1.5 for x in sizes])\n Sizes.extend(sizes)\n Scores.extend(weights)\n XSegments.extend(x_segments)\n YSegments.extend(y_segments)\n Names.extend([username] * npoints)\n Ntweets.extend([ntweet] * npoints)\n\n source = ColumnDataSource(data=dict(\n x=Xpoints,\n y=Ypoints,\n colors=Colors,\n names=Names,\n sizes=Sizes,\n offsets=Offsets,\n memes=Memes,\n xsegments=XSegments,\n ysegments=YSegments ,\n tweets=Ntweets,\n scores=Scores\n ))\n self.Influencers = source\n\n def setActiveInfluencer(self, name, columns):\n indices = [i for i,x in enumerate(self.Influencers.data['names']) if x == name]\n if not indices: return\n\n influencer = BokehControler.getRows(self.Influencers, indices, columns)\n self.ActiveInfluencer = ColumnDataSource(influencer)\n\n def populateBoards(self,categories,\n stype='per_tweet',\n origin={'x': 0, 'y': 0},\n rotation = math.pi / 6):\n Xpoints = list()\n Ypoints = list()\n Colors = list()\n Memes = list()\n Names = list()\n Sizes = list()\n Scores = list()\n Offsets = list()\n Tweets = list()\n categories.append(-1)\n for cat in categories:\n ranks = self.SB.get_rankings_one(category=cat, stype=stype)\n names = [x[0] for x in ranks]\n npoints = len(names)\n scores = [x[1] for x in ranks]\n #sizes = [20 for w in scores]\n sizes = [round(w * 50 + 10) for w in scores]\n offsets = [x / 1.8 for x in sizes]\n coordinates = get_spiral_locations(npoints, center=origin, diameters=sizes, teta=rotation)\n colors = ['turquoise'] * npoints\n memes = [self.MD.get_category_name(cat)] * npoints\n tweets = [self.SB.table[name]['ntweets'] for name in names]\n\n Names.extend(names)\n Scores.extend(scores)\n Sizes.extend(sizes)\n Xpoints.extend([coord[0] for coord in coordinates])\n Ypoints.extend([coord[1] for coord in coordinates])\n Colors.extend(colors)\n Memes.extend(memes)\n Offsets.extend(offsets)\n Tweets.extend(tweets)\n\n source = ColumnDataSource(data=dict(\n x=Xpoints,\n y=Ypoints,\n colors=Colors,\n names=Names,\n sizes=Sizes,\n offsets=Offsets,\n memes=Memes,\n scores=Scores,\n tweets=Tweets\n ))\n self.Boards = source\n\n def setActiveBoard(self, meme, columns):\n indices = [i for i,x in enumerate(self.Boards.data['memes']) if x == meme]\n #print(\"board: \", meme, columns, indices)\n if not indices: return\n board = BokehControler.getRows(self.Boards,indices,columns)\n self.ActiveBoard = ColumnDataSource(board)\n\n @staticmethod\n def getRows(CDS, indices, columns):\n \"\"\"Returns a slice of a given ColumnDataSource.\n\n Args:\n CDS (ColumnDataSource) : Base data source.\n indices (list): The list of row indices (int) to be sliced out.\n columns (list): The list of column names (string) to be sliced out\n Returns:\n Dict: The new column data source in python dictionary format.\n \"\"\"\n slice = dict()\n for column in columns:\n data = [CDS.data[column][i] for i in indices]\n slice[column] = data\n return slice\n\n#if __naain__': passme__ == '__m\n", "sub_path": "Spirometer/LibOM/GuiControlers.py", "file_name": "GuiControlers.py", "file_ext": "py", "file_size_in_byte": 6484, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "math.pi", "line_number": 38, "usage_type": "attribute"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 90, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 110, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 115, "usage_type": "attribute"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 149, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 167, "usage_type": "call"}]} +{"seq_id": "239659805", "text": "import numpy as np\nfrom sklearn.svm import SVC\n\ndef fit(X, y):\n classifiers = []\n for i in range(y.shape[1]):\n classifier = SVC()\n classifier.fit(X, y[:,i])\n classifiers.append(classifier)\n X = np.hstack([X, (y[:,i]).reshape(X.shape[0],1)])\n return classifiers\n\ndef predict(classifiers, X):\n predictions = []\n for classifier in classifiers:\n pred = np.array(classifier.predict(X)).T\n predictions.append(pred)\n X = np.hstack([X, pred.reshape(X.shape[0],1)])\n return predictions\n\n \n \n\n\n\n\n\n ", "sub_path": "cc_svm.py", "file_name": "cc_svm.py", "file_ext": "py", "file_size_in_byte": 564, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sklearn.svm.SVC", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "577086197", "text": "import requests\nfrom connectDB import cursor_datas\nfrom bs4 import BeautifulSoup, UnicodeDammit\nimport os\nimport re\nimport random\nfrom random import choice\nimport pymysql\nimport sys\nsys.path.append('../')\n\n\ndef open_url(url):\n # heads = {}\n # heads['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'\n # req = requests.get(url, headers=heads).content\n # return req\n UserAgent = [\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.2)',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',\n 'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',\n 'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',\n 'Opera/9.27 (Windows NT 5.2; U; zh-cn)',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.2) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.27 Safari/525.13',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',\n 'Mozilla/5.0 (iPhone; U; CPU like Mac OS X) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/4A93 Safari/419.3',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.2) AppleWebKit/525.13 (KHTML, like Gecko) Version/3.1 Safari/525.13'\n ]\n user_agent = choice(UserAgent)\n headers = {'User-Agent': user_agent}\n html = requests.get(url, headers=headers).content\n return html\n\n\ndef find_href(req):\n soup = BeautifulSoup(req, 'lxml')\n hreflist = []\n for each in soup.find_all('tr', class_='tr3 t_one'):\n\n links = each.a.get('href')\n # r = r'(htm_data/[^\"]+\\.html)'\n r = r'^htm_data/.+?\\.html$'\n R = re.findall(r, links)\n for i in R:\n href = \"http://1024.chxdoa.pw/pw/\" + i\n if href:\n hreflist.append(href)\n return hreflist\n\n\ndef find_img(hreflist):\n print(len(hreflist))\n #datalist = []\n titlelist = []\n # dbname = sex_picture\n db_name = 'sex_picture'\n for i in hreflist:\n html = open_url(i)\n srclist = []\n soup = BeautifulSoup(html, 'lxml')\n titles = soup.find('h1').get_text().strip()\n title = titles.replace('[','').replace(']','')\n #cursor.execute(\"DROP TABLE IF EXISTS `\"+title+\"`\")\n sql = \"\"\"CREATE TABLE IF NOT EXISTS `\"\"\"+title+\"\"\"`(\n ID INT NOT NULL AUTO_INCREMENT PRIMARY KEY,\n href CHAR(100) NOT NULL,\n likenumber INT,\n collect INT,\n see INT\n )\"\"\"\n \n cursor_datas(sql,db_name)\n \n #strip去除多余的空格(\\n)\n titlelist.append(title.strip())\n for each in soup.select('div[class=f14] > img'): \n src = each.get('src').strip()\n srclist.append(src)\n for e in srclist:\n \n data = \"INSERT IGNORE INTO `\"+title+\"`(`href`,`likenumber`,`collect`,`see`) VALUES ('\"+str(e)+\"',0,0,0);\"\n \n cursor_datas(data,db_name)\n \n \n\nif __name__ == '__main__':\n \n url = 'http://1024.chxdoa.pw/pw/thread.php?fid=14'\n find_img(find_href(open_url(url)))\n", "sub_path": "picture-vue/getpicturelist.py", "file_name": "getpicturelist.py", "file_ext": "py", "file_size_in_byte": 3344, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 32, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 39, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 46, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 63, "usage_type": "call"}, {"api_name": "connectDB.cursor_datas", "line_number": 75, "usage_type": "call"}, {"api_name": "connectDB.cursor_datas", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "96178259", "text": "__author__ = 'Hk4Fun'\n__date__ = '2018/5/5 11:15'\n\nimport hashlib\nimport random\nimport struct\nimport argparse\nimport sys\nimport asyncio\n\nfrom chap_exception import *\n\n# Header = Code (1 Byte) + Identifier (1 Byte ) + Length (2 Byte )\nheader_len = 4\n\n# Constants used in the protocol fields\nCHALLENGE_CODE = 0x01\nRESPONSE_CODE = 0x02\nSUCCESS_CODE = 0x03\nFAILURE_CODE = 0x04\nBIND_REQUEST_CODE = 0x05\nBIND_RESPONSE_CODE = 0x06\nCONNECT_REQUEST_CODE = 0x07\nCONNECT_RESPONSE_CODE = 0x08\nDATA_CODE = 0x09\nDISCONNECT_CODE = 0x0a\n\n\nclass base_chap:\n def __init__(self, loop):\n self.connect_id = set()\n self.loop = loop\n self.reader = None\n self.writer = None\n self.identifier = None\n\n @classmethod\n def check_port(cls, port):\n port = int(port)\n if not 0 <= port <= 65535:\n raise argparse.ArgumentTypeError('port should be range(0, 65536)')\n return port\n\n def check_identifier(func):\n def wrapper(*args, **kwargs):\n self, packet = args\n if packet['identifier'] != self.identifier:\n raise IdentifierException(packet['identifier'])\n return func(*args, **kwargs)\n\n return wrapper\n\n def check_code(code):\n def decorator(func):\n def wrapper(*args, **kwargs):\n _, packet = args\n if packet['code'] != code:\n raise ProtocolException(packet['code'])\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n def send_packet(self, packet):\n self.writer.write(packet)\n\n async def receive_packet(self):\n try:\n header = await self.reader.readexactly(header_len)\n except asyncio.streams.IncompleteReadError:\n return\n else:\n if header == '': raise RuntimeError(\"socket connection broken\")\n (code, identifier, length) = struct.unpack('!BBH', header)\n\n packet = header\n chunk = await self.reader.readexactly(length - header_len)\n if chunk == '': raise RuntimeError(\"socket connection broken\")\n packet = packet + chunk\n\n (code, identifier, length, data) = struct.unpack('!BBH' + str(length - header_len) + 's', packet)\n return {'code': code,\n 'identifier': identifier,\n 'length': length,\n 'data': data}\n\n def create_protocol_packet(self, code, data):\n data_len = len(data)\n packet_len = header_len + data_len\n\n # Packing format:\n # ! ==> use network byte order\n # B ==> encode as a C unsigned char (8 bit character == octect)\n # s ==> encode as a string character (in particular NNs => encode NN characters)\n\n pack_format = '!BBH' + str(data_len) + 's'\n\n if isinstance(data, str):\n data = data.encode()\n\n return struct.pack(pack_format, code, self.identifier, packet_len, data)\n\n def send_data(self, data, connect_id):\n code = DATA_CODE\n data = connect_id.encode() + b'#' + data\n self.send_packet(self.create_protocol_packet(code, data))\n\n @check_code(DATA_CODE)\n @check_identifier\n def parse_data(self, packet):\n connect_id, data = packet['data'].split(b'#', 1)\n connect_id = connect_id.decode()\n if connect_id not in self.connect_id:\n raise ConnectIdException(connect_id)\n print('Data from connect_id ', connect_id)\n print('Data:', data)\n return connect_id, data\n\n def send_disconnect(self, connect_id):\n code = DISCONNECT_CODE\n data = connect_id\n self.send_packet(self.create_protocol_packet(code, data))\n\n @check_code(DISCONNECT_CODE)\n @check_identifier\n def parse_disconnect(self, packet):\n connect_id = packet['data'].decode()\n if connect_id not in self.connect_id:\n raise ConnectIdException(connect_id)\n print('Closing connection connect_id:', connect_id)\n return connect_id\n\n\nclass peer(base_chap):\n def __init__(self, args, loop):\n super().__init__(loop)\n chap_socket = args.chap_socket.split(':', 1)\n self.authenticator, self.port = chap_socket[0], base_chap.check_port(chap_socket[1])\n self.identity, self.secret = args.user_pwd.split(':', 1)\n self.remote_port = args.port\n self.loop.run_until_complete(self.connect())\n self.loop.run_until_complete(self.handshake())\n\n async def connect(self):\n wait_time = 2\n while True: # Automatic reconnection after failed connection\n try:\n self.reader, self.writer = await asyncio.open_connection(self.authenticator,\n self.port, loop=self.loop)\n break\n except ConnectionRefusedError:\n print('Can not connect {}:{}, try again......'.format(self.authenticator, self.port))\n asyncio.sleep(wait_time)\n\n def parse_challenge(self, packet):\n if packet['code'] != CHALLENGE_CODE:\n raise ProtocolException(packet['code'])\n self.identifier = packet['identifier']\n challenge_len = struct.unpack('!B', bytes((packet['data'][0],)))[0]\n self.challenge = packet['data'][1:challenge_len + 1]\n print(\"Processing challenge with identifier:\", packet['identifier'])\n\n def send_response(self):\n response_value = hashlib.sha1((chr(self.identifier) + self.secret + str(self.challenge)).encode()).digest()\n response_value_size = struct.pack('!B', len(response_value))\n code = RESPONSE_CODE\n data = response_value_size + response_value + self.identity.encode()\n print(\"Creating response with identifier:\", self.identifier)\n self.send_packet(self.create_protocol_packet(code, data))\n\n @base_chap.check_identifier\n def parse_result(self, packet):\n if packet['code'] != SUCCESS_CODE and packet['code'] != FAILURE_CODE:\n raise ProtocolException(packet['code'])\n if packet['code'] == SUCCESS_CODE:\n print(\"Successfully authenticated!\")\n elif packet['code'] == FAILURE_CODE:\n print(\"Could not authenticate. Reason from the authenticator:\", packet['data'].decode())\n raise VarifyError()\n\n def send_bind_request(self):\n print(\"Start negotiate Remote Listen's port\", self.remote_port)\n code, data = BIND_REQUEST_CODE, str(self.remote_port)\n self.send_packet(self.create_protocol_packet(code, data))\n\n @base_chap.check_code(BIND_RESPONSE_CODE)\n @base_chap.check_identifier\n def parse_bind_response(self, packet):\n print('Remote Listen is listening at', int(packet['data']))\n\n async def handshake(self):\n try:\n self.parse_challenge(await self.receive_packet())\n self.send_response()\n\n self.parse_result(await self.receive_packet())\n self.send_bind_request()\n\n self.parse_bind_response(await self.receive_packet())\n\n except ChapError as e:\n print(e)\n self.writer.close()\n sys.exit(1)\n\n @base_chap.check_code(CONNECT_REQUEST_CODE)\n @base_chap.check_identifier\n def parse_connect_request(self, packet):\n request_id = packet['data'].decode()\n print('New connect from Remote Client, request_id ', request_id)\n return request_id\n\n def send_connect_response(self, request_id, result):\n code = CONNECT_RESPONSE_CODE\n connect_id = '0'\n if result: connect_id = self._generate_connect_id()\n data = request_id + '#' + ['0', '1'][result] + '#' + connect_id\n self.send_packet(self.create_protocol_packet(code, data))\n return connect_id\n\n def _generate_connect_id(self):\n id = str(random.randint(0, 1000000))\n while id in self.connect_id:\n id = str(random.randint(0, 1000000))\n self.connect_id.add(id)\n return id\n\n\nclass authenticator(base_chap):\n def __init__(self, args, loop):\n super().__init__(loop)\n self.port = args.port\n self.user_list = self._make_user_list(args)\n self.request_id = set()\n self.start_server()\n\n def _make_user_list(self, args):\n user_list = {}\n for user in args.user_pwd.split(','):\n identity, secret = user.split(':', 1)\n user_list[identity] = secret\n return user_list\n\n def start_server(self):\n coro = asyncio.start_server(self.handle_connect, '0.0.0.0', self.port, loop=self.loop)\n self.server = self.loop.run_until_complete(coro)\n print('Listening at 0.0.0.0:{}......'.format(self.port))\n self.loop.run_forever()\n\n async def handle_connect(self, reader, writer):\n self.reader, self.writer = reader, writer\n peer_host, peer_port, = writer.get_extra_info('peername')\n print('Connection from: {}:{}'.format(peer_host, peer_port))\n self.server.close() # only one chap connection at a time\n await self.handshake()\n self.loop.stop() # handshake over\n\n def send_challenge(self):\n self.identifier = random.randint(0, 255)\n # Create some random challenge, using the hash of a string\n # composed of 60 random integer number in the range\n # [1,100000000]\n self.challenge = hashlib.sha1(''.join(map(str, random.sample(range(10000000), 60))).encode()).digest()\n challenge_size = struct.pack('!B', len(self.challenge))\n code = CHALLENGE_CODE\n data = challenge_size + self.challenge\n print(\"Creating challenge with identifier:\", self.identifier)\n self.send_packet(self.create_protocol_packet(code, data))\n\n @base_chap.check_code(RESPONSE_CODE)\n @base_chap.check_identifier\n def parse_response(self, packet):\n response_len = struct.unpack('!B', bytes((packet['data'][0],)))[0]\n self.response = packet['data'][1:response_len + 1]\n self.identity = packet['data'][response_len + 1:]\n print(\"Processing response with identifier:\", packet['identifier'])\n\n def verify_response(self):\n print(\"Verifying response for identifier:\", self.identifier)\n user_list = self.user_list\n identity = self.identity.decode()\n if identity in user_list:\n secret = user_list[identity]\n our_value = hashlib.sha1((chr(self.identifier) + secret + str(self.challenge)).encode()).digest()\n if our_value == self.response:\n return True\n return False\n\n def send_result(self, valid):\n if valid:\n code = SUCCESS_CODE\n data = ''\n print('Verify successfully!')\n else:\n code = FAILURE_CODE\n data = 'Identity or secret is incorrect'\n self.send_packet(self.create_protocol_packet(code, data))\n\n @base_chap.check_code(BIND_REQUEST_CODE)\n @base_chap.check_identifier\n def parse_bind_request(self, packet):\n self.bind_port = int(packet['data'])\n\n def send_bind_response(self):\n if self.bind_port == 0:\n self.bind_port = random.randint(1025, 65535)\n code = BIND_RESPONSE_CODE\n data = str(self.bind_port)\n self.send_packet(self.create_protocol_packet(code, data))\n\n async def handshake(self):\n try:\n self.send_challenge()\n self.parse_response(await self.receive_packet())\n valid = self.verify_response()\n\n self.send_result(valid)\n if not valid: raise VarifyError()\n self.parse_bind_request(await self.receive_packet())\n\n self.send_bind_response()\n except ChapError as e:\n print(e)\n\n def send_connect_request(self):\n code = CONNECT_REQUEST_CODE\n request_id = self._generate_request_id()\n data = request_id\n self.send_packet(self.create_protocol_packet(code, data))\n return request_id\n\n def _generate_request_id(self):\n id = str(random.randint(0, 1000000))\n while id in self.request_id:\n id = str(random.randint(0, 1000000))\n self.request_id.add(id)\n return id\n\n @base_chap.check_code(CONNECT_RESPONSE_CODE)\n @base_chap.check_identifier\n def parse_connect_response(self, packet):\n request_id, result, connect_id = packet['data'].decode().split('#', 2)\n if request_id not in self.request_id:\n raise RequestIdException(request_id)\n result = int(result)\n if result:\n self.connect_id.add(connect_id)\n print('Connect to Local Server successfully! Connect_id:', connect_id)\n else:\n print('Connect to Local Server failed!')\n return request_id, result, connect_id\n", "sub_path": "chap.py", "file_name": "chap.py", "file_ext": "py", "file_size_in_byte": 12816, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentTypeError", "line_number": 41, "usage_type": "call"}, {"api_name": "asyncio.streams", "line_number": 71, "usage_type": "attribute"}, {"api_name": "struct.unpack", "line_number": 75, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 82, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 102, "usage_type": "call"}, {"api_name": "asyncio.open_connection", "line_number": 149, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 154, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 160, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 165, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 166, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 205, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 223, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 225, "usage_type": "call"}, {"api_name": "asyncio.start_server", "line_number": 246, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 260, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 264, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 264, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 265, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 274, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 285, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 307, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 334, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 336, "usage_type": "call"}]} +{"seq_id": "625214828", "text": "import torch\nimport torchvision.models as models\nfrom torchvision.datasets import ImageFolder\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom tqdm import tqdm\nimport numpy as np\nimport argparse\nimport os\nimport sys\nimport math\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom knn_search_util import search_index_pytorch\nimport faiss\n\n\ndef init_dataset(dataset_root, batch_size):\n '''\n Initialize the datasets, samplers and dataloaders\n '''\n\n train_transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.5243, 0.4289, 0.3736],\n std= [0.1202, 0.1094, 0.1154]\n )\n ])\n\n\n train_dataset = ImageFolder(dataset_root,\n train_transform)\n\n tr_dataloader = torch.utils.data.DataLoader(train_dataset,\n batch_size=batch_size, shuffle=False)\n\n return train_dataset, tr_dataloader\n\n\nclass Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\ndef init_protonet():\n '''\n Initialize the ProtoNet\n '''\n # device = 'cuda:0' if torch.cuda.is_available() and opt.cuda else 'cpu'\n # model = ProtoNet().to(device)\n modules = list(models.resnet34(pretrained=False).children())[:-1]\n modules.append(Flatten())\n model = nn.Sequential(*modules)\n return model\n\nclass FeatModel(nn.Module):\n def __init__(self, model):\n super(FeatModel, self).__init__()\n self.features = nn.Sequential(\n *list(model.children())[:-1]\n )\n self.features.add_module('Flatten', Flatten())\n\n def forward(self, x):\n x = self.features(x)\n x = F.normalize(x, p=2, dim=1)\n return x\n\nclass DistillModel(nn.Module):\n def __init__(self, model, num_classes):\n super(DistillModel, self).__init__()\n self.feat = nn.Sequential(\n *list(model.children())[:-1]\n )\n self.classifier = nn.Linear(512, num_classes)\n \n def forward(self, x):\n feature = self.feat(x)\n logits = self.classifier(\n feature.view(feature.size(0), -1))\n return feature, logits\n\ndef main(args):\n\n with torch.no_grad():\n trainset, train_loader = init_dataset(args.train_data_dir, args.batch_size)\n testset, test_loader = init_dataset(args.test_data_dir, args.batch_size)\n\n if args.use_proto:\n model = init_protonet()\n elif args.use_class:\n class_model = models.resnet34(pretrained=False, num_classes=1000)\n class_model.load_state_dict(torch.load(args.model))\n model = FeatModel(class_model )\n elif args.use_distill:\n class_model = models.resnet34(pretrained=False, num_classes=1000)\n model = DistillModel(class_model, 1000)\n model.load_state_dict(torch.load(args.model))\n model = FeatModel(model)\n else:\n class_model = models.resnet34(pretrained=False)\n model = FeatModel(class_model)\n model.load_state_dict(torch.load(args.model))\n\n model.cuda()\n model.eval()\n\n\n feat_novel = torch.zeros((len(trainset), 512))\n label_novel = torch.zeros((len(trainset)))\n\n feat_query = torch.zeros((len(testset), 512))\n label_query = torch.zeros((len(testset)))\n\n print('Runing forward on noval images')\n # tr_iter = iter(train_loader)\n for idx, batch in enumerate(tqdm(train_loader)):\n x, y = batch\n x, y = x.cuda(), y.cuda()\n model_output = model(x)\n start_idx = idx*args.batch_size\n end_idx = min((idx+1)*args.batch_size, len(trainset))\n feat_novel[start_idx: end_idx, :] = model_output\n label_novel[start_idx: end_idx] = y\n\n print('Runing forward on query images')\n for idx, batch in enumerate(tqdm(test_loader)):\n x, y = batch\n x, y = x.cuda(), y.cuda()\n model_output = model(x)\n start_idx = idx*args.batch_size\n end_idx = min((idx+1)*args.batch_size, len(testset))\n feat_query[start_idx: end_idx, :] = model_output\n label_query[start_idx: end_idx] = y\n\n labels0 = label_novel.data.cpu().numpy()\n labels1 = label_query.data.cpu().numpy()\n same = labels0 == labels1[:, np.newaxis]\n r, c = np.where(same)\n\n res = faiss.StandardGpuResources()\n index = faiss.GpuIndexFlatIP(res, 512)\n index.add(feat_novel.data.cpu().numpy())\n\n # top 5 precision\n k5 = 5 # we want to see 5 nearest neighbors\n D5, I5 = search_index_pytorch(index, feat_query, k5)\n prec5 = (np.isin(c.reshape(-1, 1), I5[r])).sum() / c.shape[0]\n\n # top 1 acc\n k1 = 1\n D1, I1 = search_index_pytorch(index, feat_query, k1)\n prec1 = (c.reshape(-1, 1) == I1[r]).sum().item() / c.shape[0]\n\n print(\"top 5 precision {}\".format(prec5))\n print(\"top 1 precision {}\".format(prec1))\n # print(\"recall {}\".format(c.shape[0]/2000))\n\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('train_data_dir', type=str,\n help='Path to the data directory containing aligned LFW face patches.')\n parser.add_argument('test_data_dir', type=str,\n help='Path to the data directory containing aligned LFW face patches.')\n parser.add_argument('model', type=str,\n help='Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file')\n parser.add_argument('--batch_size', type=int,\n help='Number of images to process in a batch.', default=90)\n parser.add_argument('--use_proto', action='store_true')\n parser.add_argument('--use_class', action='store_true')\n parser.add_argument('--use_distill', action='store_true')\n\n return parser.parse_args(argv)\n\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n", "sub_path": "cluster.py", "file_name": "cluster.py", "file_ext": "py", "file_size_in_byte": 6179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 25, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 26, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 26, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torchvision.models.resnet34", "line_number": 52, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 57, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 86, "usage_type": "call"}, {"api_name": "torchvision.models.resnet34", "line_number": 93, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 94, "usage_type": "call"}, {"api_name": "torchvision.models.resnet34", "line_number": 97, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 99, "usage_type": "call"}, {"api_name": "torchvision.models.resnet34", "line_number": 102, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 114, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 118, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 140, "usage_type": "call"}, {"api_name": "faiss.StandardGpuResources", "line_number": 142, "usage_type": "call"}, {"api_name": "faiss.GpuIndexFlatIP", "line_number": 143, "usage_type": "call"}, {"api_name": "knn_search_util.search_index_pytorch", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 149, "usage_type": "call"}, {"api_name": "knn_search_util.search_index_pytorch", "line_number": 153, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 163, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 180, "usage_type": "attribute"}]} +{"seq_id": "82154897", "text": "'''\nHelper functions for the test code\n'''\n\nimport os\nimport json\nimport pytest\n\nfrom util import sort_count_pairs\n\n# keep lint quiet on test code.\n#pylint: disable-msg=consider-using-ternary\n\nBASE_DIR = os.path.dirname(__file__)\nTEST_DIR = os.path.join(BASE_DIR, \"tests\")\n\ndef read_config_file(filename):\n '''\n Load the test cases from a JSON file.\n\n Inputs:\n filename (string): the name of the test configuration file.\n\n Returns: (list) test cases\n '''\n\n full_path = os.path.join(TEST_DIR, filename)\n try:\n with open(full_path) as f:\n return json.load(f)\n except FileNotFoundError:\n msg = (\"Cannot open file: {}.\\n\"\n \"Did you remember to run the script to get\"\n \" the data and the test files?\")\n pytest.fail(msg.format(full_path))\n\n\ndef gen_none_error(recreate_msg):\n '''\n Generate the error message for an unexpected return value of None.\n\n Inputs:\n recreate_msg (string): a string with the informatino needed to\n rerun the test in ipython.\n\n Returns (string): error message\n '''\n\n msg = \"The function returned None.\"\n msg += \" Did you forget to include a return statement?\\n\"\n return msg + recreate_msg + \"\\n\"\n\n\ndef gen_type_error(recreate_msg, expected, actual):\n '''\n Generate the error message for an return value of the wrong type\n\n Inputs:\n recreate_msg (string): a string with the informatino needed to\n rerun the test in ipython.\n\n Returns (string): error message\n '''\n\n msg = \"The function returned a value of the wrong type.\\n\"\n msg += \" Expected return type: {}.\\n\"\n msg += \" Actual return type: {}.\\n\"\n msg += recreate_msg + \"\\n\"\n return msg.format(type(expected), type(actual))\n\n\ndef gen_mismatch_error(recreate_msg, expected, actual):\n '''\n Generate the error message for the case whether the expected and\n actual values do not match.\n\n Inputs:\n recreate_msg (string): a string with the informatino needed to\n rerun the test in ipython.\n\n Returns (string): error message\n '''\n\n msg = \"\\nActual ({}) and expected ({}) values do not match.\\n\"\n msg += recreate_msg + \"\\n\"\n return msg.format(actual, expected)\n\n\ndef is_sequence(arg):\n '''\n Take this code as a black box, it checks whether an object is a\n list or tuple but not a string.\n\n Ref. stackoverflow.com/questions/\n 1835018/python-check-if-an-object-is-a-list-or-tuple-but-not-string\n '''\n return (not hasattr(arg, \"strip\") and\n hasattr(arg, \"__getitem__\") or\n hasattr(arg, \"__iter__\"))\n\n\ndef check_tuple_list(actual, recreate_msg):\n '''\n doc string\n '''\n\n assert actual is not None, gen_none_error(recreate_msg)\n\n msg = \"Expected a sorted list of pairs. Got {}.\\n{}\"\n\n assert is_sequence(actual), \\\n msg.format(type(actual), recreate_msg)\n\n for val in actual:\n msg = (\"Expected a sorted list of pairs.\"\n \" Got list with at least one {}.\\n{}\")\n assert isinstance(val, tuple), \\\n msg.format(type(val), recreate_msg)\n\n\ndef compare_lists(actual, params, recreate_msg):\n '''\n Do a test, check the result, report an error, if necessary.\n '''\n expected = params[\"expected\"]\n\n if actual != expected:\n if len(actual) != len(expected):\n msg = (\"Length of actual result ({}) does not match \"\n \"the length of the expected result ({}).\\n{}\")\n pytest.fail(msg.format(len(actual), len(expected), recreate_msg))\n\n for i, actual_val in enumerate(actual):\n if actual_val != expected[i]:\n msg = (\"At index {}:\"\n \" Actual result ({}) does not match\"\n \" Expected result ({}).\\n{}\")\n pytest.fail(msg.format(i,\n actual_val,\n expected[i],\n recreate_msg))\n # Test succeeded if you get to here\n return\n\n\ndef compare_sets(actual, params, recreate_msg):\n '''\n Do a test, check the result, report an error, if necessary.\n '''\n expected = params[\"expected\"]\n\n assert isinstance(actual, set), \\\n \"Wrong return type. Expected a set. Got {}\".format(type(actual))\n\n if actual != expected:\n if len(actual) != len(expected):\n msg = (\"Length of actual result ({}) does not match \"\n \"the length of the expected result ({}).\\n{}\")\n pytest.fail(msg.format(len(actual), len(expected), recreate_msg))\n\n if actual - expected:\n msg = (\"Actual includes unexpected values: {}\")\n pytest.fail(msg.format(actual - expected))\n\n if expected - actual:\n msg = (\"Actual missing expected values: {}\")\n pytest.fail(msg.format(expected - actual))\n\n # Test succeeded if you get to here\n return\n\n\ndef compare_tuple_lists(actual, params, recreate_msg):\n '''\n Do a test, check the result, report an error, if necessary.\n '''\n\n print(\"Actual:\", actual)\n print()\n print(\"Expected:\", params[\"expected\"])\n\n # check the type\n check_tuple_list(actual, recreate_msg)\n\n expected = params[\"expected\"]\n\n if actual != expected:\n if len(actual) != len(expected):\n msg = (\"Length of actual result ({}) does not match \"\n \"the length of the expected result ({}).\\n{}\")\n pytest.fail(msg.format(len(actual), len(expected), recreate_msg))\n\n if sort_count_pairs(actual) == expected:\n msg = \"Actual result is not sorted properly.\\n{}\"\n pytest.fail(msg.format(recreate_msg))\n\n for i, actual_val in enumerate(actual):\n if actual_val != expected[i]:\n msg = (\"At index {}:\"\n \" Actual result ({}) does not match\"\n \" Expected result ({}).\\n{}\")\n pytest.fail(msg.format(i,\n actual_val,\n expected[i],\n recreate_msg))\n # Test succeeded if you get to here\n return\n\n\ndef compare_list_of_lists(actual, params, recreate_msg):\n '''\n Check the result, report an error, if necessary.\n '''\n\n # check the type\n assert actual is not None, gen_none_error(recreate_msg)\n\n msg = \"Expected list of lists. Got {}.\\n{}\"\n assert is_sequence(actual), \\\n msg.format(type(actual), recreate_msg)\n\n expected = params[\"expected\"]\n\n if actual != expected:\n if len(actual) != len(expected):\n msg = (\"Length of actual result ({}) does not match \"\n \"the length of the expected result ({}).\\n{}\")\n pytest.fail(msg.format(len(actual), len(expected), recreate_msg))\n\n for i, actual_val in enumerate(actual):\n msg = (\"Expected list of lists of {}.\"\n \" Got list with at least one {}.\\n{}\")\n assert is_sequence(actual_val), \\\n msg.format(type(actual_val), recreate_msg)\n\n if actual_val != expected[i]:\n msg = (\"At index {}:\"\n \" Actual result ({}) does not match\"\n \" Expected result ({}).\\n{}\")\n pytest.fail(msg.format(i,\n actual_val,\n expected[i],\n recreate_msg))\n\n # Test succeeded if you get to here\n return\n", "sub_path": "test_helpers.py", "file_name": "test_helpers.py", "file_ext": "py", "file_size_in_byte": 7565, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 30, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 35, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 131, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 138, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 159, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 163, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 167, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 191, "usage_type": "call"}, {"api_name": "util.sort_count_pairs", "line_number": 193, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 195, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 202, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 228, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 240, "usage_type": "call"}]} +{"seq_id": "320247321", "text": "'''\nThis module reads the configuration from command line and makes it available my global variables.\n'''\n\nimport argparse\nimport sys\n# Default Settings\nnb_proto = 50\nnum_classes = 50 # Total number of classes\nnum_classes_itera = [0, 10, 15, 20, 25, 30, 35, 40, 45, 50] # Total number of classes for each iteration\nbatch_size = 256 # Batch size\nnb_batches = 9 # Number of groups\nepochs_first_batch = 5 # Training epochs in first batch\nepochs_other_batches = 5 # Training epochs in other batches\ninitial_lr_first_batch = 0.04 # Initial learning rate for first batch\nlr_strat_first_batch = [] # Epochs where learning rate gets decreased for first batch\ninitial_lr_other_batches = 0.08 # Initial learning rate in batches other than the first\nlr_strat_other_batches = [] # Epochs where learning rate gets decreased in batches other than the first\nlr_factor = 5. # Learning rate decrease factor\ngpu = '0' # Used GPU\nwght_decay = 0.0005 # Weight Decay\nmomentum = 0.9 # Momentum for SGD\nimage_size = 128\nnetwork = 'mvggnet'\n\n# Parse command line arguments\nparser = argparse.ArgumentParser(description='iCaRL running on Core50 dataset')\nparser.add_argument('--run', required=True, help='The run to execute')\nparser.add_argument('--lr_1', type=float, help='Learning rate for the first batch')\nparser.add_argument('--ep_1', type=int, help='Learning epochs for the first batch')\nparser.add_argument('--lr_o', type=float, help='Learning rate for the other batches')\nparser.add_argument('--ep_o', type=int, help='Learning epochs for the other batches')\nparser.add_argument('--stored_images', type=int, help='Stored images per class')\nparser.add_argument('--weight_decay', type=float, help='Weight decay')\nparser.add_argument('--image_size', type=int, help='Images size as network input')\nparser.add_argument('--network', help='Network: mvggnet or mcaffenet')\nargs = parser.parse_args()\n\n# Override some settings\nexecution = args.run\nepochs_first_batch = args.ep_1 if args.ep_1 is not None else epochs_first_batch\ninitial_lr_first_batch = args.lr_1 if args.lr_1 is not None else initial_lr_first_batch\nepochs_other_batches = args.ep_o if args.ep_o is not None else epochs_other_batches\ninitial_lr_other_batches = args.lr_o if args.lr_o is not None else initial_lr_other_batches\nnb_proto = args.stored_images if args.stored_images is not None else nb_proto\nwght_decay = args.weight_decay if args.weight_decay is not None else wght_decay\nimage_size = args.image_size if args.image_size is not None else image_size\nnetwork = args.network if args.network is not None else network\n\nif network != 'mvggnet' and network != 'mcaffenet':\n sys.exit(1)", "sub_path": "iCaRL-Tensorflow-27-Core50-VGG/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 2774, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "535405457", "text": "# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport logging\nimport re\n\n\nimport botocore.exceptions\nfrom botocore import xform_name\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.emr import configutils\nfrom awscli.customizations.emr import emrutils\nfrom awscli.customizations.emr import exceptions\nfrom awscli.customizations.emr.command import Command\nfrom awscli.customizations.emr.constants import EC2\nfrom awscli.customizations.emr.constants import EC2_ROLE_NAME\nfrom awscli.customizations.emr.constants import EMR\nfrom awscli.customizations.emr.constants import EMR_ROLE_NAME\nfrom awscli.customizations.emr.exceptions import ResolveServicePrincipalError\n\n\nLOG = logging.getLogger(__name__)\n\n\nEC2_ROLE_POLICY = {\n \"Statement\": [\n {\n \"Action\": [\n \"cloudwatch:*\",\n \"dynamodb:*\",\n \"ec2:Describe*\",\n \"elasticmapreduce:Describe*\",\n \"rds:Describe*\",\n \"s3:*\",\n \"sdb:*\",\n \"sns:*\",\n \"sqs:*\"\n ],\n \"Effect\": \"Allow\",\n \"Resource\": [\"*\"]\n }\n ]\n}\n\n\nEMR_ROLE_POLICY = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": [\n \"ec2:AuthorizeSecurityGroupIngress\",\n \"ec2:CancelSpotInstanceRequests\",\n \"ec2:CreateSecurityGroup\",\n \"ec2:CreateTags\",\n \"ec2:Describe*\",\n \"ec2:DeleteTags\",\n \"ec2:ModifyImageAttribute\",\n \"ec2:ModifyInstanceAttribute\",\n \"ec2:RequestSpotInstances\",\n \"ec2:RunInstances\",\n \"ec2:TerminateInstances\",\n \"iam:PassRole\",\n \"iam:ListRolePolicies\",\n \"iam:GetRole\",\n \"iam:GetRolePolicy\",\n \"iam:ListInstanceProfiles\",\n \"s3:Get*\",\n \"s3:List*\",\n \"s3:CreateBucket\",\n \"sdb:BatchPutAttributes\",\n \"sdb:Select\"\n ],\n \"Effect\": \"Allow\",\n \"Resource\": \"*\"\n }\n ]\n}\n\n\ndef assume_role_policy(serviceprincipal):\n return {\n \"Version\": \"2008-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"\",\n \"Effect\": \"Allow\",\n \"Principal\": {\"Service\": serviceprincipal},\n \"Action\": \"sts:AssumeRole\"\n }\n ]\n }\n\n\ndef get_service_principal(service, endpoint_host):\n return service + '.' + _get_suffix(endpoint_host)\n\n\ndef _get_suffix(endpoint_host):\n return _get_suffix_from_endpoint_host(endpoint_host)\n\n\ndef _get_suffix_from_endpoint_host(endpoint_host):\n suffix_match = _get_regex_match_from_endpoint_host(endpoint_host)\n\n if suffix_match is not None and suffix_match.lastindex >= 3:\n suffix = suffix_match.group(3)\n else:\n raise ResolveServicePrincipalError\n\n return suffix\n\n\ndef _get_regex_match_from_endpoint_host(endpoint_host):\n if endpoint_host is None:\n return None\n regex_match = re.match(\"(https?://)([^.]+).elasticmapreduce.([^/]*)\",\n endpoint_host)\n\n # Supports 'elasticmapreduce.{region}.' and '{region}.elasticmapreduce.'\n if regex_match is None:\n regex_match = re.match(\"(https?://elasticmapreduce).([^.]+).([^/]*)\",\n endpoint_host)\n return regex_match\n\n\nclass CreateDefaultRoles(Command):\n NAME = \"create-default-roles\"\n DESCRIPTION = ('Creates the default IAM role ' +\n EC2_ROLE_NAME + ' and ' +\n EMR_ROLE_NAME + ' which can be used when'\n ' creating the cluster using the create-cluster command.\\n'\n '\\nIf you do not have a Service Role and Instance Profile '\n 'variable set for your create-cluster command in the AWS '\n 'CLI config file, create-default-roles will automatically '\n 'set the values for these variables with these default '\n 'roles. If you have already set a value for Service Role '\n 'or Instance Profile, create-default-roles will not '\n 'automatically set the defaults for these variables in the '\n 'AWS CLI config file. You can view settings for variables '\n 'in the config file using the \"aws configure get\" command.'\n '\\n')\n ARG_TABLE = [\n {'name': 'iam-endpoint',\n 'no_paramfile': True,\n 'help_text': '

The IAM endpoint to call for creating the roles.'\n ' This is optional and should only be specified when a'\n ' custom endpoint should be called for IAM operations'\n '.

'}\n ]\n\n def _run_main_command(self, parsed_args, parsed_globals):\n ec2_result = None\n emr_result = None\n self.iam_endpoint_url = parsed_args.iam_endpoint\n\n self._check_for_iam_endpoint(self.region, self.iam_endpoint_url)\n self.emr_endpoint_url = \\\n self._session.create_client(\n 'emr',\n region_name=self.region,\n endpoint_url=parsed_globals.endpoint_url,\n verify=parsed_globals.verify_ssl).meta.endpoint_url\n\n LOG.debug('elasticmapreduce endpoint used for resolving'\n ' service principal: ' + self.emr_endpoint_url)\n\n # Check if the default EC2 Role for EMR exists.\n role_name = EC2_ROLE_NAME\n if self._check_if_role_exists(role_name, parsed_globals):\n LOG.debug('Role ' + role_name + ' exists.')\n else:\n LOG.debug('Role ' + role_name + ' does not exist.'\n ' Creating default role for EC2: ' + role_name)\n ec2_result = self._create_role_with_role_policy(\n role_name, role_name, EC2,\n emrutils.dict_to_string(EC2_ROLE_POLICY),\n parsed_globals)\n\n # Check if the default EC2 Instance Profile for EMR exists.\n instance_profile_name = EC2_ROLE_NAME\n if self._check_if_instance_profile_exists(instance_profile_name,\n parsed_globals):\n LOG.debug('Instance Profile ' + instance_profile_name + ' exists.')\n else:\n LOG.debug('Instance Profile ' + instance_profile_name +\n 'does not exist. Creating default Instance Profile ' +\n instance_profile_name)\n self._create_instance_profile_with_role(instance_profile_name,\n instance_profile_name,\n parsed_globals)\n\n # Check if the default EMR Role exists.\n role_name = EMR_ROLE_NAME\n if self._check_if_role_exists(role_name, parsed_globals):\n LOG.debug('Role ' + role_name + ' exists.')\n else:\n LOG.debug('Role ' + role_name + ' does not exist.'\n ' Creating default role for EMR: ' + role_name)\n emr_result = self._create_role_with_role_policy(\n role_name, role_name, EMR,\n emrutils.dict_to_string(EMR_ROLE_POLICY),\n parsed_globals)\n\n configutils.update_roles(self._session)\n\n emrutils.display_response(\n self._session,\n 'create_role',\n self._construct_result(ec2_result, emr_result),\n parsed_globals)\n\n return 0\n\n def _check_for_iam_endpoint(self, region, iam_endpoint):\n try:\n self._session.create_client('emr', region)\n except botocore.exceptions.UnknownEndpointError:\n if iam_endpoint is None:\n raise exceptions.UnknownIamEndpointError(region=region)\n\n def _construct_result(self, ec2_response, emr_response):\n result = []\n self._construct_role_and_role_policy_structure(\n result, ec2_response, EC2_ROLE_POLICY)\n self._construct_role_and_role_policy_structure(\n result, emr_response, EMR_ROLE_POLICY)\n return result\n\n def _construct_role_and_role_policy_structure(\n self, list, response, role_policy):\n if response is not None and response[1] is not None:\n list.append({'Role': response[1]['Role'],\n 'RolePolicy': role_policy})\n return list\n\n def _check_if_role_exists(self, role_name, parsed_globals):\n parameters = {'RoleName': role_name}\n try:\n self._call_iam_operation('GetRole', parameters, parsed_globals)\n except Exception as e:\n role_not_found_msg = 'The role with name ' + role_name +\\\n ' cannot be found'\n if role_not_found_msg in e.message:\n # No role error.\n return False\n else:\n # Some other error. raise.\n raise e\n\n return True\n\n def _check_if_instance_profile_exists(self, instance_profile_name,\n parsed_globals):\n parameters = {'InstanceProfileName': instance_profile_name}\n try:\n self._call_iam_operation('GetInstanceProfile', parameters,\n parsed_globals)\n except Exception as e:\n profile_not_found_msg = 'Instance Profile ' +\\\n instance_profile_name +\\\n ' cannot be found.'\n if profile_not_found_msg in e.message:\n # No instance profile error.\n return False\n else:\n # Some other error. raise.\n raise e\n\n return True\n\n def _create_role_with_role_policy(\n self, role_name, policy_name, service_name, policy_document,\n parsed_globals):\n service_principal = get_service_principal(service_name,\n self.emr_endpoint_url)\n LOG.debug(service_principal)\n\n parameters = {'RoleName': role_name}\n _assume_role_policy = \\\n emrutils.dict_to_string(assume_role_policy(service_principal))\n parameters['AssumeRolePolicyDocument'] = _assume_role_policy\n create_role_response = self._call_iam_operation('CreateRole',\n parameters,\n parsed_globals)\n\n parameters = {}\n parameters['PolicyDocument'] = policy_document\n parameters['PolicyName'] = policy_name\n parameters['RoleName'] = role_name\n self._call_iam_operation('PutRolePolicy', parameters, parsed_globals)\n\n return create_role_response\n\n def _create_instance_profile_with_role(self, instance_profile_name,\n role_name, parsed_globals):\n # Creating an Instance Profile\n parameters = {'InstanceProfileName': instance_profile_name}\n self._call_iam_operation('CreateInstanceProfile', parameters,\n parsed_globals)\n # Adding the role to the Instance Profile\n parameters = {}\n parameters['InstanceProfileName'] = instance_profile_name\n parameters['RoleName'] = role_name\n self._call_iam_operation('AddRoleToInstanceProfile', parameters,\n parsed_globals)\n\n def _call_iam_operation(self, operation_name, parameters, parsed_globals):\n client = self._session.create_client(\n 'iam', self.region, self.iam_endpoint_url,\n parsed_globals.verify_ssl)\n return getattr(client, xform_name(operation_name))(**parameters)\n", "sub_path": "awscli/customizations/emr/createdefaultroles.py", "file_name": "createdefaultroles.py", "file_ext": "py", "file_size_in_byte": 12369, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "awscli.customizations.emr.exceptions.ResolveServicePrincipalError", "line_number": 119, "usage_type": "name"}, {"api_name": "re.match", "line_number": 127, "usage_type": "call"}, {"api_name": "re.match", "line_number": 132, "usage_type": "call"}, {"api_name": "awscli.customizations.emr.command.Command", "line_number": 137, "usage_type": "name"}, {"api_name": "awscli.customizations.emr.constants.EC2_ROLE_NAME", "line_number": 140, "usage_type": "name"}, {"api_name": "awscli.customizations.emr.constants.EMR_ROLE_NAME", "line_number": 141, "usage_type": "name"}, {"api_name": "awscli.customizations.emr.constants.EC2_ROLE_NAME", "line_number": 179, "usage_type": "name"}, {"api_name": "awscli.customizations.emr.constants.EC2", "line_number": 186, "usage_type": "argument"}, {"api_name": "awscli.customizations.emr.emrutils.dict_to_string", "line_number": 187, "usage_type": "call"}, {"api_name": "awscli.customizations.emr.emrutils", "line_number": 187, "usage_type": "name"}, {"api_name": "awscli.customizations.emr.constants.EC2_ROLE_NAME", "line_number": 191, "usage_type": "name"}, {"api_name": "awscli.customizations.emr.constants.EMR_ROLE_NAME", "line_number": 204, "usage_type": "name"}, {"api_name": "awscli.customizations.emr.constants.EMR", "line_number": 211, "usage_type": "argument"}, {"api_name": "awscli.customizations.emr.emrutils.dict_to_string", "line_number": 212, "usage_type": "call"}, {"api_name": "awscli.customizations.emr.emrutils", "line_number": 212, "usage_type": "name"}, {"api_name": "awscli.customizations.emr.configutils.update_roles", "line_number": 215, "usage_type": "call"}, {"api_name": "awscli.customizations.emr.configutils", "line_number": 215, "usage_type": "name"}, {"api_name": "awscli.customizations.emr.emrutils.display_response", "line_number": 217, "usage_type": "call"}, {"api_name": "awscli.customizations.emr.emrutils", "line_number": 217, "usage_type": "name"}, {"api_name": "botocore.exceptions.exceptions", "line_number": 228, "usage_type": "attribute"}, {"api_name": "botocore.exceptions", "line_number": 228, "usage_type": "name"}, {"api_name": "awscli.customizations.emr.exceptions.UnknownIamEndpointError", "line_number": 230, "usage_type": "call"}, {"api_name": "awscli.customizations.emr.exceptions", "line_number": 230, "usage_type": "name"}, {"api_name": "awscli.customizations.emr.emrutils.dict_to_string", "line_number": 291, "usage_type": "call"}, {"api_name": "awscli.customizations.emr.emrutils", "line_number": 291, "usage_type": "name"}, {"api_name": "botocore.xform_name", "line_number": 322, "usage_type": "call"}]} +{"seq_id": "588649322", "text": "import torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataloader import default_collate\nimport numpy as np\nimport time\nfrom model_training.utils import loadmat, CustomTensorDataset, load_weights, load_labels, resample, slide_and_cut, load_challenge_data\nfrom model_training.util import my_find_challenge_files\nimport os\nfrom utils.denoising import filter_and_detrend\n\n# Challenge Dataloaders and Challenge metircs\n\nclass BaseDataLoader(DataLoader):\n \"\"\"\n Base class for all data loaders\n \"\"\"\n\n def __init__(self, train_dataset, val_dataset, test_dataset, batch_size, shuffle, num_workers,\n collate_fn=default_collate, pin_memory=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.test_dataset = test_dataset\n self.batch_size = batch_size\n self.batch_idx = 0\n self.shuffle = shuffle\n\n self.init_kwargs = {\n 'dataset': self.train_dataset,\n 'batch_size': batch_size,\n 'shuffle': self.shuffle,\n 'collate_fn': collate_fn,\n 'num_workers': num_workers,\n 'pin_memory': pin_memory,\n 'drop_last': True\n }\n super().__init__(**self.init_kwargs)\n\n self.n_samples = len(self.train_dataset)\n\n self.valid_data_loader_init_kwargs = {\n 'dataset': self.val_dataset,\n 'batch_size': batch_size,\n 'shuffle': self.shuffle,\n 'collate_fn': collate_fn,\n 'num_workers': num_workers,\n 'pin_memory': pin_memory,\n 'drop_last': True\n }\n\n self.valid_data_loader = DataLoader(**self.valid_data_loader_init_kwargs)\n\n self.valid_data_loader.n_samples = len(self.val_dataset)\n\n if self.test_dataset:\n\n self.test_data_loader_init_kwargs = {\n 'dataset': self.test_dataset,\n 'batch_size': batch_size,\n 'shuffle': False,\n 'collate_fn': collate_fn,\n 'num_workers': num_workers,\n 'pin_memory': pin_memory,\n 'drop_last': True\n }\n\n self.test_data_loader = DataLoader(**self.test_data_loader_init_kwargs)\n\n self.test_data_loader.n_samples = len(self.test_dataset)\n\nclass ChallengeDataset():\n \"\"\"\n challenge2020 data loading\n \"\"\"\n def __init__(self, label_dir, split_index, batch_size=128, shuffle=True, num_workers=0, resample_Fs=500, window_size=5000, n_segment=1,\n normalization=False, training_size=None, augmentations=None, p=0.5, lead_number=12, save_data=False, load_saved_data=False):\n self.label_dir = label_dir\n self.dir2save_data = '/data/ecg/challenge2021/data/'\n dir2save_data = '/data/ecg/challenge2021/data/'\n start = time.time()\n\n # Define the weights, the SNOMED CT code for the normal class, and equivalent SNOMED CT codes.\n weights_file = 'weights.csv'\n\n # Load the scored classes and the weights for the Challenge metric.\n print('Loading weights...')\n _, weights, indices = load_weights(weights_file)\n classes = \"164889003,164890007,6374002,426627000,733534002,713427006,270492004,713426002,39732003,445118002,164947007,251146004,111975006,698252002,426783006,63593006,10370003,365413008,427172004,164917005,47665007,427393009,426177001,427084000,164934002,59931005\"\n ### equivalent SNOMED CT codes merged, noted as the larger one\n classes = classes.split(',')\n self.weights = weights\n\n # Load the label and output files.\n print('Loading label and output files...')\n label_files = my_find_challenge_files(label_dir)\n labels_onehot = load_labels(label_files, classes)\n\n split_idx = loadmat(split_index)\n train_index, val_index = split_idx['train_index'], split_idx['val_index']\n train_index = train_index.reshape((train_index.shape[1],))\n if training_size is not None: # for test\n train_index = train_index[0:training_size]\n val_index = val_index.reshape((val_index.shape[1],))\n # test_index = test_index.reshape((test_index.shape[1],))\n\n num_files = len(label_files)\n train_number = 0\n val_number = 0\n for i in range(num_files):\n name = label_files[i].split('/')[-1].split('.')[0]\n if i in train_index or name[0] == 'A' or name[0] == 'Q':\n train_number += 1\n elif i in val_index:\n val_number += 1\n print(\"train number: {}, val number: {}\".format(train_number, val_number))\n\n train_recordings = np.zeros((train_number, 12, 5000), dtype=float)\n train_class_weights = np.zeros((train_number, 26,), dtype=float)\n train_labels_onehot = np.zeros((train_number, 26,), dtype=float)\n\n val_recordings = np.zeros((val_number, 12, 5000), dtype=float)\n val_class_weights = np.zeros((val_number, 26,), dtype=float)\n val_labels_onehot = np.zeros((val_number, 26,), dtype=float)\n\n # file_names = list()\n\n ### class weights for datasets\n # equivalent diagnose [['713427006', '59118001'], ['63593006', '284470004'], ['427172004', '17338001'], ['733534002', '164909002']]\n # CPSC\n CPSC_classes = ['270492004', '164889003', '733534002', '63593006', '426783006',\n '713427006'] # \"59118001\" = \"713427006\"\n CPSC_class_weight = np.zeros((26,))\n for cla in CPSC_classes:\n CPSC_class_weight[classes.index(cla)] = 1\n # CPSC_extra\n CPSC_extra_excluded_classes = ['6374002', '39732003', '445118002', '251146004', '365413008',\n '164947007', '365413008', '164947007', '698252002', '426783006',\n '10370003', '111975006', '164917005', '47665007', '427393009',\n '426177001', '164934002', '59931005']\n CPSC_extra_class_weight = np.ones((26,))\n for cla in CPSC_extra_excluded_classes:\n CPSC_extra_class_weight[classes.index(cla)] = 0\n # PTB-XL\n PTB_XL_excluded_classes = ['6374002', '426627000', '365413008', '427172004'] # , '17338001'\n PTB_XL_class_weight = np.ones((26,))\n for cla in PTB_XL_excluded_classes:\n PTB_XL_class_weight[classes.index(cla)] = 0\n # G12ECG\n G12ECG_excluded_classes = ['10370003', '365413008', '164947007']\n G12ECG_class_weight = np.ones((26,))\n for cla in G12ECG_excluded_classes:\n G12ECG_class_weight[classes.index(cla)] = 0\n # Chapman Shaoxing\n Chapman_excluded_classes = ['6374002', '426627000', '713426002', '445118002', '10370003', '365413008',\n '427172004', '427393009', '427084000', '63593006']\n Chapman_class_weight = np.ones((26,))\n for cla in Chapman_excluded_classes:\n Chapman_class_weight[classes.index(cla)] = 0\n # Ningbo\n Ningbo_excluded_classes = ['164889003', '164890007', '426627000']\n Ningbo_class_weight = np.ones((26,))\n for cla in Ningbo_excluded_classes:\n Ningbo_class_weight[classes.index(cla)] = 0\n\n if load_saved_data == False:\n ### preprocess data and label\n train_num = 0\n val_num = 0\n for i in range(num_files):\n print('{}/{}'.format(i + 1, num_files))\n recording, header, name = load_challenge_data(label_files[i], label_dir)\n\n if name[0] == 'S' or name[0] == 'I': # filter PTB or St.P dataset\n continue\n elif name[0] == 'A': # CPSC\n class_weight = CPSC_class_weight\n elif name[0] == 'Q': # CPSC-extra\n class_weight = CPSC_extra_class_weight\n elif name[0] == 'H': # PTB-XL\n class_weight = PTB_XL_class_weight\n elif name[0] == 'E': # G12ECG\n class_weight = G12ECG_class_weight\n elif name[0] == 'J' and int(name[2:]) <= 10646: # Chapman\n class_weight = Chapman_class_weight\n elif name[0] == 'J' and int(name[2:]) > 10646: # Ningbo\n class_weight = Ningbo_class_weight\n else:\n print('warning! not from one of the datasets: ', name)\n continue\n\n recording[np.isnan(recording)] = 0\n\n # divide ADC_gain and resample\n recording = resample(recording, header, resample_Fs)\n\n # to filter and detrend samples\n recording = filter_and_detrend(recording)\n\n # slide and cut\n recording = slide_and_cut(recording, n_segment, window_size, resample_Fs)\n # file_names.append(name)\n if i in train_index or name[0] == 'A' or name[0] == 'Q':\n for j in range(recording.shape[0]): # segment number = 1 -> j=0\n train_recordings[train_num] = recording[j]\n train_labels_onehot[train_num] = labels_onehot[i]\n train_class_weights[train_num] = class_weight\n train_num += 1\n elif i in val_index:\n for j in range(recording.shape[0]):\n val_recordings[val_num] = recording[j]\n val_labels_onehot[val_num] = labels_onehot[i]\n val_class_weights[val_num] = class_weight\n val_num += 1\n else:\n pass\n\n # train_recordings = np.array(train_recordings)\n # train_class_weights = np.array(train_class_weights)\n # train_labels_onehot = np.array(train_labels_onehot)\n #\n # val_recordings = np.array(val_recordings)\n # val_class_weights = np.array(val_class_weights)\n # val_labels_onehot = np.array(val_labels_onehot)\n\n else:\n train_recordings = np.load(os.path.join(dir2save_data, 'train_recordings_' + 'windowSize' + str(\n window_size) + '_' + 'samplingRate' + str(\n resample_Fs) + '.npy'))\n train_class_weights = np.load(os.path.join(dir2save_data, 'train_class_weights_' + 'windowSize' + str(\n window_size) + '_' + 'samplingRate' + str(\n resample_Fs) + '.npy'))\n train_labels_onehot = np.load(os.path.join(dir2save_data, 'train_labels_onehot_' + 'windowSize' + str(\n window_size) + '_' + 'samplingRate' + str(\n resample_Fs) + '.npy'))\n val_recordings = np.load(os.path.join(dir2save_data, 'val_recordings_' + 'windowSize' + str(\n window_size) + '_' + 'samplingRate' + str(\n resample_Fs) + '.npy'), )\n val_class_weights = np.load(os.path.join(dir2save_data, 'val_class_weights_' + 'windowSize' + str(\n window_size) + '_' + 'samplingRate' + str(\n resample_Fs) + '.npy'), )\n val_labels_onehot = np.load(os.path.join(dir2save_data, 'val_labels_onehot_' + 'windowSize' + str(\n window_size) + '_' + 'samplingRate' + str(\n resample_Fs) + '.npy'), )\n print('data loaded!')\n\n if save_data:\n if not os.path.exists(dir2save_data):\n os.mkdir(dir2save_data)\n np.save(os.path.join(dir2save_data,\n 'train_recordings_' + 'windowSize' + str(window_size) + '_' + 'samplingRate' + str(\n resample_Fs) + '.npy'), train_recordings)\n np.save(os.path.join(dir2save_data,\n 'train_class_weights_' + 'windowSize' + str(window_size) + '_' + 'samplingRate' + str(\n resample_Fs) + '.npy'), train_class_weights)\n np.save(os.path.join(dir2save_data,\n 'train_labels_onehot_' + 'windowSize' + str(window_size) + '_' + 'samplingRate' + str(\n resample_Fs) + '.npy'), train_labels_onehot)\n np.save(os.path.join(dir2save_data,\n 'val_recordings_' + 'windowSize' + str(window_size) + '_' + 'samplingRate' + str(\n resample_Fs) + '.npy'), val_recordings)\n np.save(os.path.join(dir2save_data,\n 'val_class_weights_' + 'windowSize' + str(window_size) + '_' + 'samplingRate' + str(\n resample_Fs) + '.npy'), val_class_weights)\n np.save(os.path.join(dir2save_data,\n 'val_labels_onehot_' + 'windowSize' + str(window_size) + '_' + 'samplingRate' + str(\n resample_Fs) + '.npy'), val_labels_onehot)\n print('data saved!!!')\n\n # # Normalization\n # if normalization:\n # train_recordings = self.normalization(train_recordings)\n # val_recordings = self.normalization(val_recordings)\n\n ### To check nan value\n #\n # nan_files_train_num = 0\n # nan_files_val_num = 0\n # for i in range(len(train_recordings)):\n # if np.isnan(train_recordings[i]).any() == True:\n # # tmp = train_recordings[i]\n # # tmp2 = original_recordings[i]\n # print('train_recordings', i)\n # nan_files_train_num += 1\n # for i in range(len(val_recordings)):\n # if np.isnan(val_recordings[i]).any() == True:\n # print('val_recordings', i)\n # nan_files_val_num += 1\n # print(\"files number with nan value: \", nan_files_train_num, nan_files_val_num)\n # assert np.isnan(train_recordings).any() == False\n # assert np.isnan(val_recordings).any() == False\n # train_recordings[np.isnan(train_recordings)] = 0\n # val_recordings[np.isnan(val_recordings)] = 0\n # assert np.isnan(train_class_weights).any() == False\n # assert np.isnan(val_class_weights).any() == False\n\n train_recordings = torch.from_numpy(train_recordings)\n train_class_weights = torch.from_numpy(train_class_weights)\n train_labels_onehot = torch.from_numpy(train_labels_onehot)\n\n val_recordings = torch.from_numpy(val_recordings)\n val_class_weights = torch.from_numpy(val_class_weights)\n val_labels_onehot = torch.from_numpy(val_labels_onehot)\n\n # have moved this code into Dataset\n # ### 12 leads order: I II III aVL aVR aVF V1 V2 V3 V4 V5 V6\n # if lead_number == 2: # two leads: I II\n # leads_index = [0, 1]\n # elif lead_number == 3: # three leads: I II V2\n # leads_index = [0, 1, 7]\n # elif lead_number == 4: # four leads: I II III V2\n # leads_index = [0, 1, 2, 7]\n # elif lead_number == 6: # six leads: I II III aVL aVR aVF\n # leads_index = [0, 1, 2, 3, 4, 5]\n # elif lead_number == 8: # eight leads\n # leads_index = [0, 1, 6, 7, 8, 9, 10, 11]\n # else: # twelve leads\n # leads_index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n #\n # X_train = X_train[:, leads_index, :]\n # X_val = X_val[:, leads_index, :]\n\n self.train_dataset = CustomTensorDataset(train_recordings, train_labels_onehot, train_class_weights)\n self.val_dataset = CustomTensorDataset(val_recordings, val_labels_onehot, val_class_weights)\n\n end = time.time()\n print('time to get and process data: {}'.format(end - start))\n # super().__init__(self.train_dataset, self.val_dataset, None, batch_size, shuffle, num_workers)\n\n # self.valid_data_loader.file_names = file_names\n # self.valid_data_loader.idx = val_index\n\nclass ChallengeDataLoader(BaseDataLoader):\n \"\"\"\n challenge2020 data loading\n \"\"\"\n def __init__(self, train_dataset, val_dataset, batch_size=128, shuffle=True, num_workers=0):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n\n super().__init__(self.train_dataset, self.val_dataset, None, batch_size, shuffle, num_workers)\n\n # self.valid_data_loader.file_names = file_names\n # self.valid_data_loader.idx = val_index\n", "sub_path": "model_training/training.py", "file_name": "training.py", "file_ext": "py", "file_size_in_byte": 16441, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "torch.utils.data.DataLoader", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.utils.data.dataloader.default_collate", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 66, "usage_type": "call"}, {"api_name": "time.time", "line_number": 79, "usage_type": "call"}, {"api_name": "model_training.utils.load_weights", "line_number": 86, "usage_type": "call"}, {"api_name": "model_training.util.my_find_challenge_files", "line_number": 94, "usage_type": "call"}, {"api_name": "model_training.utils.load_labels", "line_number": 95, "usage_type": "call"}, {"api_name": "model_training.utils.loadmat", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 160, "usage_type": "call"}, {"api_name": "model_training.utils.load_challenge_data", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 190, "usage_type": "call"}, {"api_name": "model_training.utils.resample", "line_number": 193, "usage_type": "call"}, {"api_name": "utils.denoising.filter_and_detrend", "line_number": 196, "usage_type": "call"}, {"api_name": "model_training.utils.slide_and_cut", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path", "line_number": 225, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path", "line_number": 228, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path", "line_number": 231, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path", "line_number": 234, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path", "line_number": 237, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path", "line_number": 248, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 251, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 251, "usage_type": "call"}, {"api_name": "os.path", "line_number": 251, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 254, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 254, "usage_type": "call"}, {"api_name": "os.path", "line_number": 254, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path", "line_number": 257, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path", "line_number": 260, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 263, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 263, "usage_type": "call"}, {"api_name": "os.path", "line_number": 263, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 295, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 296, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 297, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 299, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 300, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 301, "usage_type": "call"}, {"api_name": "model_training.utils.CustomTensorDataset", "line_number": 321, "usage_type": "call"}, {"api_name": "model_training.utils.CustomTensorDataset", "line_number": 322, "usage_type": "call"}, {"api_name": "time.time", "line_number": 324, "usage_type": "call"}]} +{"seq_id": "390182483", "text": "#!/usr/bin/env python\n\n\"\"\"EDA.py: Exploratory Data Analysis of sample dataset\"\"\"\n\n__author__ = \"Tim Verlaan 11669128\"\n\nimport csv\nimport math\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt \nfrom scipy import stats\nimport seaborn as sns\nimport pprint as pp\nimport json\n\ndef set_up():\n \"\"\"Parsing & Preprocessing\"\"\"\n \n csv_file = \"input.csv\"\n \n # strip the white spaces from the region column\n with open(csv_file) as f:\n reader = csv.reader(f, delimiter=\",\")\n with open(\"stripped.csv\", \"w\") as fo:\n writer = csv.writer(fo)\n for row in reader:\n writer.writerow([e.strip() for e in row])\n\n # list with stand-ins for empty cells\n missing_values = [\"n/a\", \"na\", \"unknown\", \"-\", \"\"]\n\n # set missing values to NaN\n df = pd.read_csv(\"stripped.csv\", na_values = missing_values, skipinitialspace = True)\n\n # drop columns we won't be using\n df = df.drop(columns=['Population', 'Area (sq. mi.)', 'Coastline (coast/area ratio)', 'Net migration', \n 'Literacy (%)', 'Phones (per 1000)', 'Arable (%)', 'Crops (%)', 'Other (%)', \n 'Climate', 'Birthrate', 'Deathrate', 'Agriculture', 'Industry', 'Service'])\n\n\n # strip the string 'dollar' and set remaining value to floats\n df['GDP ($ per capita) dollars'] = df['GDP ($ per capita) dollars'].str.lstrip('+-').str.rstrip('dollars').astype(float)\n\n # make commas into dots\n df['Infant mortality (per 1000 births)'] = df['Infant mortality (per 1000 births)'].str.replace(',', '.')\n df['Pop. Density (per sq. mi.)'] = df['Pop. Density (per sq. mi.)'].str.replace(',', '.')\n\n # make strings into numerics\n df['Infant mortality (per 1000 births)'] = pd.to_numeric(df['Infant mortality (per 1000 births)'], errors='coerce')\n df['Pop. Density (per sq. mi.)'] = pd.to_numeric(df['Pop. Density (per sq. mi.)'], errors='coerce')\n\n # unify the region column\n df.loc[df['Region'].str.contains('NEAR EAST'), 'Region'] = 'ASIA (EX. NEAR EAST)'\n\n # remove the outliners - considered outliner when bigger than 3 std.dev to the right of the distribution\n df = df[df['GDP ($ per capita) dollars'] < (3*df['GDP ($ per capita) dollars'].std())]\n\n print(df)\n\n return(df)\n\n\ndef central_tendency(df):\n \"\"\"Analyzing & Presenting GDP\"\"\"\n\n # make boxplots to see if there are outliners\n # sns.boxplot(x=df['GDP ($ per capita) dollars'])\n\n # calc descriptives. Note: mode[0] makes sure it only prints the value, not the zero and the dtype\n std_dev = df['GDP ($ per capita) dollars'].std()\n mode = df['GDP ($ per capita) dollars'].mode()[0]\n mean = df['GDP ($ per capita) dollars'].mean()\n median = df['GDP ($ per capita) dollars'].median()\n\n print(f'\\n GDP standard deviation = {std_dev} \\n GDP mode = {mode} \\n GDP mean = {mean} \\n GDP median = {median} \\n')\n\n # Sturge's Rule for amount of Bins in histogram source: https://www.statisticshowto.datasciencecentral.com/choose-bin-sizes-statistics/\n K = round(1 + math.log(226,2))\n\n # plot the GDP graph with correct graph info\n df['GDP ($ per capita) dollars'].hist(bins=K)\n plt.ylabel('Number of Countries')\n plt.xlabel('GDP ($ per capita)')\n plt.suptitle('GDP ($ per capita) around the Globe')\n plt.show()\n\ndef five_num_sum (df):\n \"\"\"Analyzing & Presenting Infancy\"\"\"\n\n # check if the descriptives match\n # print(df.describe())\n\n # make boxplots to see if there are outliners and swarm plot to see distribution more clearly\n sns.boxplot(x=df['Infant mortality (per 1000 births)'])\n sns.swarmplot(x=df['Infant mortality (per 1000 births)'], color = \".01\")\n \n # calc the min, first quant, median, third quant, max\n infant_minimum = df['Infant mortality (per 1000 births)'].min()\n infant_25 = df['Infant mortality (per 1000 births)'].quantile(0.25)\n infant_median = df['Infant mortality (per 1000 births)'].median()\n infant_75 = df['Infant mortality (per 1000 births)'].quantile(0.75)\n infant_maximum = df['Infant mortality (per 1000 births)'].max()\n\n #print the boxplot and swarmplot\n plt.show()\n \n # print the descriptives\n print(f' Infant minimum = {infant_minimum}\\n Infant first quantile = {infant_25}\\n Infant median = {infant_median}\\n Infant third quantile = {infant_75}\\n Infant maximum = {infant_maximum} \\n ')\n\ndef converting(df):\n \"\"\"Presenting in a JSON file\"\"\"\n\n # create dict with country-column as index\n df_dict = df.set_index('Country').T.to_dict('dict')\n\n # make json file from the dict\n with open('result.json', 'w') as fp:\n json.dump(df_dict, fp)\n\n # use pretty print to see if dict matches the json example in the exercise\n # pp.pprint(df_dict)\n\n\nif __name__ == \"__main__\":\n \"\"\"Separating the function and calling them orderly\"\"\"\n \n df = set_up()\n \n central_tendency(df)\n\n five_num_sum(df)\n\n converting(df)\n\n\n", "sub_path": "Homework/Week_2/EDA.py", "file_name": "EDA.py", "file_ext": "py", "file_size_in_byte": 4951, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "csv.reader", "line_number": 24, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 51, "usage_type": "call"}, {"api_name": "math.log", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "seaborn.boxplot", "line_number": 95, "usage_type": "call"}, {"api_name": "seaborn.swarmplot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "142570091", "text": "import praw\nimport requests\nimport urllib.request\nfrom threading import Thread\nimport time\nimport pandas as pd\nimport winsound\n\n\nreddit = praw.Reddit(client_id = '####################',\n client_secret = '####################',\n user_agent = '####################',\n username = '####################',\n password = '####################')\n\n''' Change the data using pandas '''\n\n\n\nclass operations:\n sub_file = open(\"submission_list.txt\",'a',1)\n com_file = open(\"comment_list.txt\",'a',1)\n stop_sub_thread = False\n stop_com_thread = False\n def old_comment(self,url):\n #self.sub_url = url\n submission = reddit.submission(url=url)\n comments = submission.comments.list()\n for comment in comments:\n self.com_file.write(f\"\\nOLD COMMENT = {comment.body}\")\n\n def stream_submission_comments(self,subreddit):\n print(\"thread 2 started\")\n for comment in subreddit.stream.comments():#skip_existing = True):\n with open(\"submission_list.txt\",'r') as f:\n sub_ids = f.readlines()\n com_id = comment.link_id+\"\\n\"\n if com_id in sub_ids: #add all comments containing the keywords (or keyword in com.body:)\n print(\"----- COM ADDED -----\")\n self.com_file.write(\"---------- NEW COM ----------\")\n self.com_file.write(f\"\\nID = {comment.link_id}\")\n self.com_file.write(f\"\\nCOMMENT = {comment.body}\")\n if self.stop_com_thread == True:\n break\n\n def stream_submissions(self,subreddit,find):\n print(\"thread 1 started\")\n find = find.upper()\n i = 0\n for submission in subreddit.stream.submissions():\n title = submission.title.upper()\n sub_text = submission.selftext.upper()\n if find in title or find in sub_text:\n i+=1\n print(f\"\\n---------- NEW POST {i}----------\")\n print(f\"ID = https://www.reddit.com/{submission.permalink}\")\n print(f\"title = {submission.title}\")\n self.sub_file.write(f\"{submission.name}\\n\")\n beep()\n if self.stop_sub_thread == True:\n break\n\ndef beep():\n frequency = 2500 # Set Frequency To 2500 Hertz\n duration = 150 # Set Duration To 1000 ms == 1 second\n winsound.Beep(frequency, duration)\n\nif __name__ == \"__main__\":\n subreddit = reddit.subreddit('all')\n search_for = \"book\"\n red_obj = operations()\n T1 = Thread(target=red_obj.stream_submissions,args=(subreddit,search_for,))\n T2 = Thread(target=red_obj.stream_submission_comments,args=(subreddit,))\n try:\n T1.start()\n T2.start()\n while True: time.sleep(100)\n except (KeyboardInterrupt, SystemExit):\n print(\"\\n\\t------- KEYBOARD INTERRUPT SUCCESSFUL -------\")\n red_obj.stop_sub_thread = True\n red_obj.stop_com_thread = True\n\n\"\"\"\n Create: Flask API for POST: To create data of the reddit\n Read: Flask API for GET (List/Individual): Fetch data from React\n\"\"\"", "sub_path": "reddit.py", "file_name": "reddit.py", "file_ext": "py", "file_size_in_byte": 3172, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "praw.Reddit", "line_number": 10, "usage_type": "call"}, {"api_name": "winsound.Beep", "line_number": 66, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 72, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 73, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "14017942", "text": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport errno\nimport datetime\nimport json\nimport os\nimport gzip\nimport shutil\nimport stat\n\nfrom contextlib import contextmanager, closing\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\n\nfrom configman import Namespace\nfrom socorro.external.crashstorage_base import CrashStorageBase, \\\n CrashIDNotFound\nfrom socorro.lib.ooid import dateFromOoid, depthFromOoid\nfrom socorro.lib.datetimeutil import utc_now\nfrom socorro.lib.util import DotDict\n\n@contextmanager\ndef using_umask(n):\n old_n = os.umask(n)\n yield\n os.umask(old_n)\n\n\nclass FSRadixTreeStorage(CrashStorageBase):\n \"\"\"\n This class implements basic radix tree storage. It stores crashes using the\n crash_id radix scheme under ``fs_root``.\n\n Files are stored in the following scheme::\n\n root/yyyymmdd/name_branch_base/radix.../crash_id/\n\n The date is determined using the date suffix of the crash_id, and the\n name_branch_base is given in the configuration options. The radix is\n computed from the crash_id by substringing the UUID in octets to the depth\n given in the crash_id, for instance:\n\n 0bba929f-8721-460c-dead-a43c20071025 is stored in::\n\n root/20071025/name/0b/ba/92/9f/0bba929f-8721-460c-dead-a43c20071025\n\n This storage does not implement ``new_crashes``, but is able to store\n processed crashes. Used alone, it is intended to store only processed\n crashes.\n \"\"\"\n\n required_config = Namespace()\n required_config.add_option(\n 'fs_root',\n doc='a path to a file system',\n default='./crashes',\n\n # We strip / from the right so we can consistently use os.sep.join\n # instead of os.path.join (which is faster).\n from_string_converter=lambda x: x.rstrip('/')\n )\n required_config.add_option(\n 'umask',\n doc='umask to use for new files',\n default=0o022\n )\n required_config.add_option(\n 'json_file_suffix',\n doc='the suffix used to identify a json file',\n default='.json'\n )\n required_config.add_option(\n 'jsonz_file_suffix',\n doc='the suffix used to identify a gzipped json file',\n default='.jsonz'\n )\n required_config.add_option(\n 'dump_file_suffix',\n doc='the suffix used to identify a dump file',\n default='.dump'\n )\n required_config.add_option(\n 'dump_field',\n doc='the default dump field',\n default='upload_file_minidump'\n )\n required_config.add_option(\n 'forbidden_keys',\n doc='a comma delimited list of keys to not allowed in the processed '\n 'crash',\n default='url, email, user_id, exploitability',\n from_string_converter=lambda x: [i.strip() for i in x.split(',')]\n )\n required_config.add_option(\n 'name_branch_base',\n doc='the directory base name to use for the named radix tree storage',\n default='name'\n )\n\n def __init__(self, *args, **kwargs):\n super(FSRadixTreeStorage, self).__init__(*args, **kwargs)\n try:\n with using_umask(self.config.umask):\n os.makedirs(self.config.fs_root)\n except OSError:\n self.logger.info(\"didn't make directory: %s \" %\n self.config.fs_root)\n\n @staticmethod\n def _cleanup_empty_dirs(base, leaf):\n parts = leaf.split(os.sep)\n\n while parts:\n cur = os.sep.join([base] + parts)\n parts.pop()\n\n try:\n os.rmdir(cur)\n except OSError:\n # this directory isn't empty, so we can stop cleanup\n break\n\n def _get_dump_file_name(self, crash_id, dump_name):\n if dump_name == self.config.dump_field or not dump_name:\n return crash_id + self.config.dump_file_suffix\n else:\n return \"%s.%s%s\" % (crash_id,\n dump_name,\n self.config.dump_file_suffix)\n\n @staticmethod\n def _get_radix(crash_id):\n return [crash_id[i * 2:(i + 1) * 2]\n for i in range(depthFromOoid(crash_id))]\n\n def _get_base(self, crash_id):\n date = dateFromOoid(crash_id)\n if not date:\n date = utc_now()\n date_formatted = \"%4d%02d%02d\" % (date.year, date.month, date.day)\n return [self.config.fs_root, date_formatted]\n\n def _get_radixed_parent_directory(self, crash_id):\n return os.sep.join(self._get_base(crash_id) +\n [self.config.name_branch_base] +\n self._get_radix(crash_id) +\n [crash_id])\n\n def _dump_names_from_paths(self, pathnames):\n dump_names = []\n for a_pathname in pathnames:\n base_name = os.path.basename(a_pathname)\n dump_name = base_name[37:-len(self.config.dump_file_suffix)]\n if not dump_name:\n dump_name = self.config.dump_field\n dump_names.append(dump_name)\n return dump_names\n\n def _save_files(self, crash_id, files):\n parent_dir = self._get_radixed_parent_directory(crash_id)\n\n with using_umask(self.config.umask):\n try:\n os.makedirs(parent_dir)\n except OSError:\n # probably already created, ignore\n pass\n #self.logger.debug(\"could not make directory: %s\" %\n #self.config.fs_root)\n\n for fn, contents in files.iteritems():\n with open(os.sep.join([parent_dir, fn]), 'wb') as f:\n f.write(contents)\n\n def save_processed(self, processed_crash):\n crash_id = processed_crash['uuid']\n processed_crash = processed_crash.copy()\n f = StringIO()\n for k in self.config.forbidden_keys:\n if k in processed_crash:\n del processed_crash[k]\n with closing(gzip.GzipFile(mode='wb', fileobj=f)) as fz:\n json.dump(processed_crash, fz, default=self.json_default)\n self._save_files(crash_id, {\n crash_id + self.config.jsonz_file_suffix: f.getvalue()\n })\n\n def save_raw_crash(self, raw_crash, dumps, crash_id):\n files = {\n crash_id + self.config.json_file_suffix: json.dumps(raw_crash)\n }\n files.update(dict((self._get_dump_file_name(crash_id, fn), dump)\n for fn, dump in dumps.iteritems()))\n self._save_files(crash_id, files)\n\n def get_raw_crash(self, crash_id):\n parent_dir = self._get_radixed_parent_directory(crash_id)\n if not os.path.exists(parent_dir):\n raise CrashIDNotFound\n with open(os.sep.join([parent_dir,\n crash_id + self.config.json_file_suffix]),\n 'r') as f:\n return json.load(f, object_hook=DotDict)\n\n def get_raw_dump(self, crash_id, name=None):\n parent_dir = self._get_radixed_parent_directory(crash_id)\n if not os.path.exists(parent_dir):\n raise CrashIDNotFound\n with open(os.sep.join([parent_dir,\n self._get_dump_file_name(crash_id, name)]),\n 'rb') as f:\n return f.read()\n\n def get_raw_dumps_as_files(self, crash_id):\n parent_dir = self._get_radixed_parent_directory(crash_id)\n if not os.path.exists(parent_dir):\n raise CrashIDNotFound\n dump_paths = [os.sep.join([parent_dir, dump_file_name])\n for dump_file_name in os.listdir(parent_dir)\n if dump_file_name.startswith(crash_id) and\n dump_file_name.endswith(self.config.dump_file_suffix)]\n return DotDict(zip(self._dump_names_from_paths(dump_paths),\n dump_paths))\n\n def get_raw_dumps(self, crash_id):\n def read_with(fn):\n with open(fn) as f:\n return f.read()\n return DotDict((k, read_with(v))\n for k, v\n in self.get_raw_dumps_as_files(crash_id).iteritems())\n\n def get_processed(self, crash_id):\n parent_dir = self._get_radixed_parent_directory(crash_id)\n if not os.path.exists(parent_dir):\n raise CrashIDNotFound\n with closing(gzip.GzipFile(os.sep.join([\n parent_dir,\n crash_id + self.config.jsonz_file_suffix]),\n 'rb')) as f:\n return json.load(f, object_hook=DotDict)\n\n def remove(self, crash_id):\n parent_dir = self._get_radixed_parent_directory(crash_id)\n if not os.path.exists(parent_dir):\n raise CrashIDNotFound\n shutil.rmtree(parent_dir)\n\n @staticmethod\n def json_default(obj):\n if isinstance(obj, datetime.datetime):\n return obj.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n raise TypeError\n\n\nclass FSLegacyRadixTreeStorage(FSRadixTreeStorage):\n \"\"\"\n The legacy radix tree storage implements a variant of the radix tree\n storage, designed to be backwards-compatible with the old filesystem\n module.\n\n This filesystem storage does not create a subdirectory with the crash ID\n in the radix tree to store crashes -- instead, it just stores it in the\n final radix part.\n \"\"\"\n def _get_radixed_parent_directory(self, crash_id):\n return os.sep.join(self._get_base(crash_id) +\n [self.config.name_branch_base] +\n self._get_radix(crash_id))\n\n\n def remove(self, crash_id):\n parent_dir = self._get_radixed_parent_directory(crash_id)\n if not os.path.exists(parent_dir):\n raise CrashIDNotFound\n\n removal_candidates = [os.sep.join([parent_dir,\n crash_id + '.json'])] + \\\n list(self.get_raw_dumps_as_files(crash_id)\n .values())\n\n for cand in removal_candidates:\n try:\n os.unlink(cand)\n except OSError:\n self.config.logger.error(\"could not delete: %s\", cand,\n exc_info=True)\n\nclass FSDatedRadixTreeStorage(FSRadixTreeStorage):\n \"\"\"\n This class implements dated radix tree storage -- it enables for traversing\n a radix tree using an hour/minute prefix. It allows searching for new\n crashes, but doesn't store processed crashes.\n\n It supplements the basic radix tree storage with indexing by date. It takes\n the current hour, minute and second and stores items in the following\n scheme::\n\n root/yyyymmdd/date_branch_base/hour/minute_(minute_slice)/crash_id\n\n minute_slice is computed by taking the second of the current timestamp\n and floor dividing by minute_slice_interval, e.g. a minute slice of 4\n provides slots from 0..14.\n\n This is a symlink to the items stored in the base radix tree storage.\n Additionally, a symlink is created in the base radix tree directory called\n ``date_root` which links to the ``minute_(minute_slice)`` folder.\n\n This storage class is suitable for use as raw crash storage, as it supports\n the ``new_crashes`` method.\n \"\"\"\n\n required_config = Namespace()\n required_config.add_option(\n 'date_branch_base',\n doc='the directory base name to use for the dated radix tree storage',\n default='date'\n )\n required_config.add_option(\n 'minute_slice_interval',\n doc='how finely to slice minutes into slots, e.g. 4 means every 4 '\n 'seconds a new slot will be allocated',\n default=4\n )\n\n # This is just a constant for len(self._current_slot()).\n SLOT_DEPTH = 2\n DIR_DEPTH = 2\n\n def _get_date_root_name(self, crash_id):\n return 'date_root'\n\n def _get_dump_file_name(self, crash_id, dump_name):\n if dump_name == self.config.dump_field or dump_name is None:\n return crash_id + self.config.dump_file_suffix\n else:\n return \"%s.%s%s\" % (crash_id,\n dump_name,\n self.config.dump_file_suffix)\n\n def _get_dated_parent_directory(self, crash_id, slot):\n return os.sep.join(self._get_base(crash_id) +\n [self.config.date_branch_base] + slot)\n\n def _current_slot(self):\n now = utc_now()\n return [\"%02d\" % now.hour,\n \"%02d_%02d\" % (now.minute,\n now.second //\n self.config.minute_slice_interval)]\n\n def _create_name_to_date_symlink(self, crash_id, slot):\n \"\"\"we traverse the path back up from date/slot... to make a link:\n src: \"name\"/radix.../crash_id (or \"name\"/radix... for legacy mode)\n dest: \"date\"/slot.../crash_id\"\"\"\n radixed_parent_dir = self._get_radixed_parent_directory(crash_id)\n\n root = os.sep.join([os.path.pardir] * (self.SLOT_DEPTH + 1))\n os.symlink(os.sep.join([root, self.config.name_branch_base] +\n self._get_radix(crash_id) +\n [crash_id]),\n os.sep.join([self._get_dated_parent_directory(crash_id,\n slot),\n crash_id]))\n\n def _create_date_to_name_symlink(self, crash_id, slot):\n \"\"\"the path is something like name/radix.../crash_id, so what we do is\n add 2 to the directories to go up _dir_depth + len(radix).\n we make a link:\n src: \"date\"/slot...\n dest: \"name\"/radix.../crash_id/date_root_name\"\"\"\n radixed_parent_dir = self._get_radixed_parent_directory(crash_id)\n\n root = os.sep.join([os.path.pardir] *\n (len(self._get_radix(crash_id)) + self.DIR_DEPTH))\n os.symlink(os.sep.join([root, self.config.date_branch_base] + slot),\n os.sep.join([radixed_parent_dir,\n self._get_date_root_name(crash_id)]))\n\n def save_raw_crash(self, raw_crash, dumps, crash_id):\n super(FSDatedRadixTreeStorage, self).save_raw_crash(raw_crash,\n dumps, crash_id)\n\n slot = self._current_slot()\n parent_dir = self._get_dated_parent_directory(crash_id, slot)\n\n try:\n os.makedirs(parent_dir)\n except OSError:\n # probably already created, ignore\n pass\n #self.logger.debug(\"could not make directory: %s\" %\n #parent_dir)\n\n with using_umask(self.config.umask):\n self._create_name_to_date_symlink(crash_id, slot)\n self._create_date_to_name_symlink(crash_id, slot)\n\n def remove(self, crash_id):\n dated_path = os.path.realpath(\n os.sep.join([self._get_radixed_parent_directory(crash_id),\n self._get_date_root_name(crash_id)]))\n\n try:\n # We can just unlink the symlink and later new_crashes will clean\n # up for us.\n os.unlink(os.sep.join([dated_path, crash_id]))\n except OSError:\n pass # we might be trying to remove a visited crash and that's\n # okay\n\n # Now we actually remove the crash.\n super(FSDatedRadixTreeStorage, self).remove(crash_id)\n\n def _visit_minute_slot(self, minute_slot_base):\n for crash_id in os.listdir(minute_slot_base):\n namedir = os.sep.join([minute_slot_base, crash_id])\n st_result = os.lstat(namedir)\n\n if stat.S_ISLNK(st_result.st_mode):\n # This is a link, so we can dereference it to find\n # crashes.\n if os.path.isfile(\n os.sep.join([namedir,\n crash_id +\n self.config.json_file_suffix])):\n date_root_path = os.sep.join([\n namedir,\n self._get_date_root_name(crash_id)\n ])\n yield crash_id\n\n try:\n os.unlink(date_root_path)\n except OSError as e:\n self.logger.error(\"could not find a date root in \"\n \"%s; is crash corrupt?\",\n namedir,\n exc_info=True)\n\n os.unlink(namedir)\n\n def new_crashes(self):\n \"\"\"\n The ``new_crashes`` method returns a generator that visits all new\n crashes like so:\n\n * Traverse the date root to find all crashes.\n\n * If we find a symlink in a slot, then we dereference the link and\n check if the directory has crash data.\n\n * if the directory does, then we remove the symlink in the slot,\n clean up the parent directories if they're empty and then yield\n the crash_id.\n \"\"\"\n current_slot = self._current_slot()\n\n date = utc_now()\n current_date = \"%4d%02d%02d\" % (date.year, date.month, date.day)\n\n dates = os.listdir(self.config.fs_root)\n for date in dates:\n dated_base = os.sep.join([self.config.fs_root, date,\n self.config.date_branch_base])\n\n try:\n hour_slots = os.listdir(dated_base)\n except OSError:\n # it is okay that the date root doesn't exist - skip on to\n # the next date\n #self.logger.info(\"date root for %s doesn't exist\" % date)\n continue\n\n for hour_slot in hour_slots:\n skip_dir = False\n hour_slot_base = os.sep.join([dated_base, hour_slot])\n for minute_slot in os.listdir(hour_slot_base):\n minute_slot_base = os.sep.join([hour_slot_base,\n minute_slot])\n slot = [hour_slot, minute_slot]\n\n if slot >= current_slot and date >= current_date:\n # the slot is currently being used, we want to skip it\n # for now\n self.logger.info(\"not processing slot: %s/%s\" %\n tuple(slot))\n skip_dir = True\n continue\n\n for x in self._visit_minute_slot(minute_slot_base):\n yield x\n\n try:\n # We've finished processing the slot, so we can remove\n # it.\n os.rmdir(minute_slot_base)\n except OSError as e:\n self.logger.error(\"could not fully remove directory: \"\n \"%s; are there more crashes in it?\",\n minute_slot_base,\n exc_info=True)\n\n if not skip_dir and hour_slot < current_slot[0]:\n try:\n # If the current slot is greater than the hour slot\n # we're processing, then we can conclude the directory\n # is safe to remove.\n os.rmdir(hour_slot_base)\n except OSError as e:\n self.logger.error(\"could not fully remove directory: \"\n \"%s; are there more crashes in it?\",\n hour_slot_base,\n exc_info=True)\n\n\nclass FSLegacyDatedRadixTreeStorage(FSDatedRadixTreeStorage,\n FSLegacyRadixTreeStorage):\n \"\"\"\n This legacy radix tree storage implements a backwards-compatible with the\n old filesystem storage by setting the symlinks up correctly.\n\n The rationale for creating a diamond structure for multiple inheritance is\n two-fold:\n\n * The implementation of ``_get_radixed_parent_directory`` is required from\n ``FSLegacyRadixTreeStorage`` and ``FSDatedRadixTreeStorage`` requires\n the behavior of the implementation from ``FSLegacyRadixTreeStorage`` to\n function correctly.\n\n * The implementation of ``remove`` is also required from\n ``FSDatedRadixTreeStorage``, and the order is dependent as it requires\n the MRO to resolve ``remove`` from the ``FSDatedRadixTreeStorage``\n first, over ``FSLegacyRadixTreeStorage``.\n \"\"\"\n DIR_DEPTH = 1\n\n def _get_date_root_name(self, crash_id):\n return crash_id\n\n def _create_name_to_date_symlink(self, crash_id, slot):\n root = os.sep.join([os.path.pardir] * (self.SLOT_DEPTH + 1))\n os.symlink(os.sep.join([root, self.config.name_branch_base] +\n self._get_radix(crash_id)),\n os.sep.join([self._get_dated_parent_directory(crash_id,\n slot),\n crash_id]))\n\n def _visit_minute_slot(self, minute_slot_base):\n for crash_id_or_webhead in os.listdir(minute_slot_base):\n namedir = os.sep.join([minute_slot_base, crash_id_or_webhead])\n st_result = os.lstat(namedir)\n\n if stat.S_ISLNK(st_result.st_mode):\n crash_id = crash_id_or_webhead\n\n # This is a link, so we can dereference it to find\n # crashes.\n if os.path.isfile(\n os.sep.join([namedir,\n crash_id +\n self.config.json_file_suffix])):\n date_root_path = os.sep.join([\n namedir,\n self._get_date_root_name(crash_id)\n ])\n\n yield crash_id\n\n try:\n os.unlink(date_root_path)\n except OSError as e:\n self.logger.error(\"could not find a date root in \"\n \"%s; is crash corrupt?\",\n date_root_path,\n exc_info=True)\n\n os.unlink(namedir)\n\n elif stat.S_ISDIR(st_result.st_mode):\n webhead_slot = crash_id_or_webhead\n webhead_slot_base = os.sep.join([minute_slot_base,\n webhead_slot])\n\n # This is actually a webhead slot, but we can visit it as if\n # it was a minute slot.\n for x in self._visit_minute_slot(webhead_slot_base):\n yield x\n\n try:\n os.rmdir(webhead_slot_base)\n except OSError as e:\n self.logger.error(\"could not fully remove directory: \"\n \"%s; are there more crashes in it?\",\n webhead_slot_base,\n exc_info=True)\n else:\n self.logger.critical(\"unknown file %s found\", namedir)\n", "sub_path": "socorro/external/fs/crashstorage.py", "file_name": "crashstorage.py", "file_ext": "py", "file_size_in_byte": 23579, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.umask", "line_number": 29, "usage_type": "call"}, {"api_name": "os.umask", "line_number": 31, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 27, "usage_type": "name"}, {"api_name": "socorro.external.crashstorage_base.CrashStorageBase", "line_number": 34, "usage_type": "name"}, {"api_name": "configman.Namespace", "line_number": 57, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 109, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.rmdir", "line_number": 123, "usage_type": "call"}, {"api_name": "socorro.lib.ooid.depthFromOoid", "line_number": 139, "usage_type": "call"}, {"api_name": "socorro.lib.ooid.dateFromOoid", "line_number": 142, "usage_type": "call"}, {"api_name": "socorro.lib.datetimeutil.utc_now", "line_number": 144, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 149, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 169, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 177, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 177, "usage_type": "attribute"}, {"api_name": "StringIO.StringIO", "line_number": 183, "usage_type": "call"}, {"api_name": "contextlib.closing", "line_number": 187, "usage_type": "call"}, {"api_name": "gzip.GzipFile", "line_number": 187, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 188, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "socorro.external.crashstorage_base.CrashIDNotFound", "line_number": 204, "usage_type": "name"}, {"api_name": "os.sep.join", "line_number": 205, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 205, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 208, "usage_type": "call"}, {"api_name": "socorro.lib.util.DotDict", "line_number": 208, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 212, "usage_type": "call"}, {"api_name": "os.path", "line_number": 212, "usage_type": "attribute"}, {"api_name": "socorro.external.crashstorage_base.CrashIDNotFound", "line_number": 213, "usage_type": "name"}, {"api_name": "os.sep.join", "line_number": 214, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 214, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path", "line_number": 221, "usage_type": "attribute"}, {"api_name": "socorro.external.crashstorage_base.CrashIDNotFound", "line_number": 222, "usage_type": "name"}, {"api_name": "os.sep.join", "line_number": 223, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 223, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 224, "usage_type": "call"}, {"api_name": "socorro.lib.util.DotDict", "line_number": 227, "usage_type": "call"}, {"api_name": "socorro.lib.util.DotDict", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "socorro.external.crashstorage_base.CrashIDNotFound", "line_number": 241, "usage_type": "name"}, {"api_name": "contextlib.closing", "line_number": 242, "usage_type": "call"}, {"api_name": "gzip.GzipFile", "line_number": 242, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 242, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 242, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 246, "usage_type": "call"}, {"api_name": "socorro.lib.util.DotDict", "line_number": 246, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 250, "usage_type": "call"}, {"api_name": "os.path", "line_number": 250, "usage_type": "attribute"}, {"api_name": "socorro.external.crashstorage_base.CrashIDNotFound", "line_number": 251, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 252, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 256, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 272, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 272, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "socorro.external.crashstorage_base.CrashIDNotFound", "line_number": 280, "usage_type": "name"}, {"api_name": "os.sep.join", "line_number": 282, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 282, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 289, "usage_type": "call"}, {"api_name": "configman.Namespace", "line_number": 318, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 347, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 347, "usage_type": "attribute"}, {"api_name": "socorro.lib.datetimeutil.utc_now", "line_number": 351, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 363, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 363, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 363, "usage_type": "attribute"}, {"api_name": "os.symlink", "line_number": 364, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 364, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 364, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 367, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 367, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 379, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 379, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 379, "usage_type": "attribute"}, {"api_name": "os.symlink", "line_number": 381, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 381, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 381, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 382, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 382, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 393, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 405, "usage_type": "call"}, {"api_name": "os.path", "line_number": 405, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 406, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 406, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 412, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 412, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 412, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 421, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 422, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 422, "usage_type": "attribute"}, {"api_name": "os.lstat", "line_number": 423, "usage_type": "call"}, {"api_name": "stat.S_ISLNK", "line_number": 425, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 428, "usage_type": "call"}, {"api_name": "os.path", "line_number": 428, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 429, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 429, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 432, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 432, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 439, "usage_type": "call"}, {"api_name": "os.unlink", "line_number": 446, "usage_type": "call"}, {"api_name": "socorro.lib.datetimeutil.utc_now", "line_number": 464, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 467, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 469, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 469, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 473, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 482, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 482, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 483, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 484, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 484, "usage_type": "attribute"}, {"api_name": "os.rmdir", "line_number": 502, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 514, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 547, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 547, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 547, "usage_type": "attribute"}, {"api_name": "os.symlink", "line_number": 548, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 548, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 548, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 550, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 550, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 555, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 556, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 556, "usage_type": "attribute"}, {"api_name": "os.lstat", "line_number": 557, "usage_type": "call"}, {"api_name": "stat.S_ISLNK", "line_number": 559, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 564, "usage_type": "call"}, {"api_name": "os.path", "line_number": 564, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 565, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 565, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 568, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 568, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 576, "usage_type": "call"}, {"api_name": "os.unlink", "line_number": 583, "usage_type": "call"}, {"api_name": "stat.S_ISDIR", "line_number": 585, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 587, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 587, "usage_type": "attribute"}, {"api_name": "os.rmdir", "line_number": 596, "usage_type": "call"}]} +{"seq_id": "624832984", "text": "import sys\nimport argparse\n\nclass NotebookLoader(object):\n\n def __init__(self):\n self.ip = get_ipython()\n\n def initialize(self):\n self.ip.magic('load_ext noworkflow')\n import noworkflow.now.ipython as nip\n script_locals = sys._getframe(1).f_locals\n nip.init(script_locals[\"now_path\"])\n\n def show_result(self, result, fformat=\"png\"):\n return self.ip.run_cell_magic(\"dot\", \"--format \" + fformat, result)\n\n\ndef check_color(value):\n if value.upper() in (\"YW\", \"NW\"):\n return value.upper()\n if len(value.split(',')) == 3:\n return value\n raise argparse.ArgumentTypeError(\"%s is an invalid color/schema\" % value)\n\nclass CmdLineLoader(object):\n\n def initialize(self):\n parser = argparse.ArgumentParser(description=\"Create lineage graph \"\n \"for variable or file\")\n parser.add_argument(\"name\", type=str, nargs=\"?\",\n default=\"run/data/DRT240/DRT240_11000eV_002.img\",\n help=\"variable name or filename\",)\n parser.add_argument(\"-c\", \"--color\", type=check_color, default=\"NW\",\n help=\"Color schema. Possible options: NW, YW or \"\n \"three color codes separated by comma, where the \"\n \"first one represents calls; the second one \"\n \"represents variables; and the third one \"\n \"represents files. Ex: '#3A85B9,#FFFFCC,#AAAAAAA'\")\n parser.add_argument(\"-d\", \"--direction\", type=str, default=\"BT\",\n choices=[\"BT\", \"RL\", \"TB\", \"LR\"],\n help=\"Graphviz rankdir. Default=BT\")\n\n parser.add_argument(\"-e\", \"--linebreak-equal\", action='store_true',\n help=\"Show values on a new line\")\n parser.add_argument(\"--dir\", help=\"project path\", default=\"..\")\n parser.add_argument(\"-o\", \"--output\", type=str,\n default=\"nw-lineage.dot\",\n help=\"Output dot file\")\n\n args = parser.parse_args()\n script_locals = sys._getframe(1).f_locals\n script_locals[\"var_name\"] = args.name\n script_locals[\"color_schema\"] = args.color\n script_locals[\"direction\"] = args.direction\n script_locals[\"replace_equal\"] = not args.linebreak_equal\n script_locals[\"now_path\"] = args.dir\n script_locals[\"output_file\"] = args.output\n from noworkflow.now.persistence import persistence_config\n persistence_config.connect_existing(args.dir)\n\n def show_result(self, result, fformat=\"png\"):\n pass\n\n\ndef Loader(is_notebook):\n if is_notebook:\n return NotebookLoader()\n return CmdLineLoader()\n", "sub_path": "simulate_data_collection/nw/loader.py", "file_name": "loader.py", "file_ext": "py", "file_size_in_byte": 2808, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys._getframe", "line_number": 12, "usage_type": "call"}, {"api_name": "noworkflow.now.ipython.init", "line_number": 13, "usage_type": "call"}, {"api_name": "noworkflow.now.ipython", "line_number": 13, "usage_type": "name"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 24, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 29, "usage_type": "call"}, {"api_name": "sys._getframe", "line_number": 52, "usage_type": "call"}, {"api_name": "noworkflow.now.persistence.persistence_config.connect_existing", "line_number": 60, "usage_type": "call"}, {"api_name": "noworkflow.now.persistence.persistence_config", "line_number": 60, "usage_type": "name"}, {"api_name": "{'nip': 'noworkflow.now.ipython'}", "line_number": 68, "usage_type": "call"}, {"api_name": "{'persistence_config': 'noworkflow.now.persistence.persistence_config'}", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "629884153", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport autoslug.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Connection',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('type', models.CharField(max_length=60, choices=[(b'equal', 'EQUAL'), (b'child', 'CHILD')])),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('start', models.DateTimeField(verbose_name='start date')),\n ('end', models.DateTimeField(null=True, verbose_name='end date', blank=True)),\n ('title', models.CharField(max_length=255, verbose_name='title')),\n ('description', models.TextField(verbose_name='description')),\n ('image', models.CharField(max_length=255, verbose_name='picture URL')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Person',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200, verbose_name='name')),\n ('photo', models.ImageField(upload_to=b'/home/uarpalex/Workspace/persony/persony/public/persons', max_length=255, verbose_name='photo')),\n ('info', models.TextField(verbose_name='information')),\n ('facebook', models.CharField(max_length=255, null=True, verbose_name='facebook', blank=True)),\n ('twitter', models.CharField(max_length=255, null=True, verbose_name='twitter', blank=True)),\n ('twitter_view', models.CharField(max_length=255, null=True, verbose_name='twitter view id', blank=True)),\n ('featured', models.IntegerField(blank=True, null=True, choices=[(0, 'not featured'), (1, 'featured')])),\n ('slug', autoslug.fields.AutoSlugField(editable=False)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='event',\n name='person',\n field=models.ManyToManyField(related_name='events', verbose_name='person', to='personyapp.Person'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='connection',\n name='object',\n field=models.ForeignKey(related_name='connections_objects', to='personyapp.Person', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='connection',\n name='subject',\n field=models.ForeignKey(related_name='connections_subjects', to='personyapp.Person', null=True),\n preserve_default=True,\n ),\n ]\n", "sub_path": "personyapp/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 3228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "autoslug.fields.fields.AutoSlugField", "line_number": 49, "usage_type": "call"}, {"api_name": "autoslug.fields.fields", "line_number": 49, "usage_type": "attribute"}, {"api_name": "autoslug.fields", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 61, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 67, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 67, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 70, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "654265430", "text": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport math\n\nimport numpy as np\nimport scipy.integrate\nimport sympy\n\nfrom scipy.special import sph_harm, genlaguerre\nfrom scipy.misc import factorial\n\nfrom .math import norm2\nfrom .symbolic import generate_hermite\n\nclass HydrogenLikeOrbital(object):\n def __init__(self, z, n, l, m):\n assert n > 0\n assert l >= 0\n assert l < n\n assert abs(m) <= l\n self.n = n\n self.l = l\n self.m = m\n self.z = z\n\n def evaluator(self):\n evaluator = lambda r, theta, phi: \\\n self.radial_func()(r) \\\n * self.sph_harm()(theta, phi)\n return evaluator\n\n def radial_func(self):\n evaluator = lambda r: \\\n self.normalization() * self.rho(r)**self.l \\\n * self.laguerre()(self.rho(r)) \\\n * np.exp(-.5*self.rho(r))\n return evaluator\n\n def radial_probability_density(self):\n return lambda r: self.radial_func()(r)**2\n\n def rho(self, r):\n return 2 * self.z * r / self.n\n\n def sph_harm(self):\n return lambda theta, phi: sph_harm(self.m, self.l, theta, phi)\n\n def laguerre(self):\n return genlaguerre(self.n-self.l-1, 2*self.l+1)\n\n def normalization(self):\n return np.sqrt((2*self.z/self.n)**3 * math.factorial(self.n-self.l-1)/(2*self.n*math.factorial(self.n+self.l)))\n\n\nclass SlaterTypeOrbital(object):\n def __init__(self, alpha, n, l, m):\n self.alpha = alpha\n self.n = n\n self.l = l\n self.m = m\n\n def evaluator(self):\n evaluator = lambda r, theta, phi: \\\n 1/np.sqrt(math.factorial(2*self.n)) \\\n * (2*self.alpha)**(self.n+.5) * r**(self.n-1) \\\n * np.exp(-self.alpha*r) \\\n * self.sph_harm()(theta, phi)\n return evaluator\n\n def sph_harm(self):\n return lambda theta, phi: sph_harm(self.m, self.l, theta, phi)\n\n\nclass GaussianTypeOrbital(object):\n def __init__(self, alpha, n, l, m, r0=None):\n if r0 is None:\n r0 = np.array([0, 0, 0])\n self.alpha = alpha\n self.n = n\n self.l = l\n self.m = m\n self.r0 = r0\n\n def evaluator(self):\n evaluator = lambda r, theta, phi: \\\n self.normalization() * (r-self.r0)**(self.n-1) * np.exp(-self.alpha*(r-self.r0)**2) \\\n * self.sph_harm()(theta, phi)\n return evaluator\n\n def sph_harm(self):\n return lambda theta, phi: sph_harm(self.m, self.l, theta, phi)\n\n def normalization(self):\n return np.sqrt(2**(2*self.n+1.5)/math.factorial(2*self.n-1)*np.sqrt(np.pi)) * self.alpha**(2*self.n+1)/4\n\n def normalization_symbolic(self):\n return (sympy.sqrt(sympy.Integer(2)**(2*self.n+1.5)/sympy.factorial(2*self.n-1)*sympy.sqrt(sympy.pi))\n * sympy.Rational(self.alpha)**(2*self.n+1)/4)\n\n def radial_func_symbolic(self):\n r, theta, phi = sympy.symbol(\"r theta phi\")\n return self.normalization_symbolic() * r**(self.n-1) * sympy.exp(-self.alpha*r**2)\n\n def __mul__(self, other):\n if isinstance(other, GaussianTypeOrbital):\n sigma2_self = self.alpha/2\n sigma2_other = other.alpha / 2\n r_new = (other.alpha * self.r0 + self.alpha * other.r0) / (self.alpha + other.alpha)\n\n else:\n raise Exception(\"must mul by another GTO\")\n\n\nclass CartesianGaussianTypeOrbital(object):\n def __init__(self, d, alpha, r0, i, j, k):\n self.d = d\n self.alpha = alpha\n self.i = i\n self.j = j\n self.k = k\n self.r0 = r0\n\n @property\n def p(self):\n return (self.i, self.j, self.k,)\n\n def symbolic(self, x, y, z):\n evaluator = (\n self.normalization()\n * (x - self.r0[0])**self.i\n * (y - self.r0[1])**self.j\n * (z - self.r0[2])**self.k\n * sympy.exp(-self.alpha * ((self.r0[0]-x)**2 + (self.r0[1]-y)**2 + (self.r0[2]-z)**2))\n )\n return evaluator\n\n def evaluator(self):\n x_, y_, z_ = sympy.symbols('x y z')\n symbolic = self.symbolic(x_, y_, z_)\n evaluator = lambda x, y, z: symbolic.subs({x_: x, y_: y, z_: z})\n return evaluator\n\n def normalization(self):\n value = (2*self.alpha/np.pi)**.75 * np.sqrt((8*self.alpha)**(self.i+self.j+self.k)*factorial(self.i)*factorial(self.j)*factorial(self.k) / (factorial(2*self.i) * factorial(2*self.j) * factorial(2*self.k)))\n return value\n\n def expectation_value(self, operator):\n from sympy import oo\n x, y, z = sympy.symbols('x y z')\n applied = operator.apply(self, x, y, z)\n integrand = self.symbolic(x, y, z) * applied\n integrated = sympy.integrate(integrand, (x, -oo, oo), (y, -oo, oo), (z, -oo, oo))\n return integrated.evalf()\n\n def overlap(self, operator, other_orbital):\n from sympy import oo\n x, y, z = sympy.symbols('x y z')\n applied = operator.apply(self, x, y, z)\n integrand = other_orbital.symbolic(x, y, z) * applied\n integrated = sympy.integrate(integrand, (x, -oo, oo), (y, -oo, oo), (z, -oo, oo))\n return integrated.evalf()\n\n\nclass LaplacianOperator(object):\n def __init__(self):\n pass\n\n def apply(self, orbital, x, y, z):\n from sympy.physics.vector import divergence, gradient, ReferenceFrame\n R = ReferenceFrame('R')\n symbolic = orbital.symbolic(R[0], R[1], R[2])\n result = divergence(gradient(symbolic, R), R)\n result = result.subs({R[0]: x, R[1]: y, R[2]: z})\n return result\n\n\nclass NoopOperator(object):\n def apply(self, orbital, x, y, z):\n return orbital.symbolic(x, y, z)\n\n\nclass BasisSet(object):\n def __init__(self):\n self.orbitals = list()\n self.overlaps = dict()\n\n def add(self, basis_orbital):\n self.orbitals.append(basis_orbital)\n\n def precompute_overlaps(self):\n # import ipdb; ipdb.set_trace()\n for orbital1 in self.orbitals:\n lambda1 = orbital1.evaluator()\n for orbital2 in self.orbitals:\n lambda2 = orbital2.evaluator()\n for orbital3 in self.orbitals:\n lambda3 = orbital3.evaluator()\n for orbital4 in self.orbitals:\n lambda4 = orbital4.evaluator()\n integrand = lambda x1, y1, z1, x2, y2, z2: (\n lambda1(x1, y1, z1) * lambda2(x1, y1, z1)\n / np.sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2)\n * lambda3(x2, y2, z2) * lambda4(x2, y2, z2)\n )\n result, err = scipy.integrate.nquad(\n integrand,\n [[-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf],\n [-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]\n )\n self.orbitals[orbital1, orbital2, orbital3, orbital4] = result\n\n\nclass PrimitiveBra(object):\n def __init__(self, abar, bbar, a, b, p, ap, bp, pp):\n \"\"\"\n :param abar:\n :param bbar:\n :param a:\n :param b:\n :param p:\n :param ap:\n :param bp:\n :param pp:\n :type abar: array[3]\n :type bbar: array[3]\n :type a: CartesianGaussianTypeOrbital\n :type b: CartesianGaussianTypeOrbital\n :type p: array[3]\n :type ap: float\n :type bp: float\n type pp: float\n :return:\n \"\"\"\n self.abar = abar\n self.bbar = bbar\n self.a = a\n self.b = b\n self.p = p\n self.ap = ap\n self.bp = bp\n self.pp = pp\n\n def evaluate(self, r):\n if self.abar == 0 and self.bbar == 0 and self.ap == 0 and self.bp == 0 and self.pp == 0:\n result = sympy.exp(-self.a.alpha * norm2(r - self.a.r0) - self.b.alpha * norm2(r - self.b.r0))\n xi = self.a.alpha + self.b.alpha\n P = (self.a.alpha * self.a.r0 + self.b.alpha * self.b.r0) / xi\n for ind in range(3):\n result *= (\n (r[ind] - self.a.r0[ind])**self.a.p[ind] * (r[ind] - self.b.r0[ind])**self.b.p[ind]\n * xi**(self.p[ind]/2)\n * generate_hermite(self.p[ind], sympy.sqrt(xi) * (r[ind] - P[ind]))\n )\n else:\n result = PrimitiveBra(0, 0, self.a, self.b, self.p, 0, 0, 0).evaluate(r)\n for ind in range(3):\n for counter in range(self.abar[ind]):\n result = sympy.diff(result, self.a.r0[ind])\n for counter in range(self.bbar[ind]):\n result = sympy.diff(result, self.b.r0[ind])\n result *= self.a.d * self.b.d * (2*self.a.alpha)**self.ap * (2*self.b.alpha)**self.bp\n return result", "sub_path": "orbitals.py", "file_name": "orbitals.py", "file_ext": "py", "file_size_in_byte": 8921, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.exp", "line_number": 36, "usage_type": "call"}, {"api_name": "scipy.special.sph_harm", "line_number": 46, "usage_type": "call"}, {"api_name": "scipy.special.genlaguerre", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 52, "usage_type": "call"}, {"api_name": "math.factorial", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 64, "usage_type": "call"}, {"api_name": "math.factorial", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 66, "usage_type": "call"}, {"api_name": "scipy.special.sph_harm", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.special.sph_harm", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 94, "usage_type": "call"}, {"api_name": "math.factorial", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sympy.sqrt", "line_number": 97, "usage_type": "call"}, {"api_name": "sympy.Integer", "line_number": 97, "usage_type": "call"}, {"api_name": "sympy.factorial", "line_number": 97, "usage_type": "call"}, {"api_name": "sympy.pi", "line_number": 97, "usage_type": "attribute"}, {"api_name": "sympy.Rational", "line_number": 98, "usage_type": "call"}, {"api_name": "sympy.symbol", "line_number": 101, "usage_type": "call"}, {"api_name": "sympy.exp", "line_number": 102, "usage_type": "call"}, {"api_name": "sympy.exp", "line_number": 133, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 138, "usage_type": "call"}, {"api_name": "symbolic.subs", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 144, "usage_type": "call"}, {"api_name": "scipy.misc.factorial", "line_number": 144, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 149, "usage_type": "call"}, {"api_name": "sympy.integrate", "line_number": 152, "usage_type": "call"}, {"api_name": "sympy.oo", "line_number": 152, "usage_type": "name"}, {"api_name": "sympy.symbols", "line_number": 157, "usage_type": "call"}, {"api_name": "sympy.integrate", "line_number": 160, "usage_type": "call"}, {"api_name": "sympy.oo", "line_number": 160, "usage_type": "name"}, {"api_name": "sympy.physics.vector.ReferenceFrame", "line_number": 170, "usage_type": "call"}, {"api_name": "sympy.physics.vector.divergence", "line_number": 172, "usage_type": "call"}, {"api_name": "sympy.physics.vector.gradient", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 202, "usage_type": "call"}, {"api_name": "scipy.integrate.integrate.nquad", "line_number": 205, "usage_type": "call"}, {"api_name": "scipy.integrate.integrate", "line_number": 205, "usage_type": "attribute"}, {"api_name": "scipy.integrate", "line_number": 205, "usage_type": "name"}, {"api_name": "numpy.inf", "line_number": 207, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 208, "usage_type": "attribute"}, {"api_name": "sympy.exp", "line_number": 245, "usage_type": "call"}, {"api_name": "math.norm2", "line_number": 245, "usage_type": "call"}, {"api_name": "symbolic.generate_hermite", "line_number": 252, "usage_type": "call"}, {"api_name": "sympy.sqrt", "line_number": 252, "usage_type": "call"}, {"api_name": "sympy.diff", "line_number": 258, "usage_type": "call"}, {"api_name": "sympy.diff", "line_number": 260, "usage_type": "call"}]} +{"seq_id": "468402333", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, Http404#, HttpResponseRedirect\nfrom django.core.paginator import Paginator, EmptyPage\n#from django.contrib.auth import login, authenticate\n#from django.contrib.auth.models import User\nfrom django.views.decorators.http import require_GET#, require_POST\nimport qa.models as m\n\n# Create your views here.\ndef test(request, *args, **kwargs):\n return HttpResponse(\"OK\")\n\ndef paginate(request, qs):\n try:\n limit = int(request.GET.get('limit', 10))\n except ValueError:\n limit = 10\n if limit > 100:\n limit = 10\n try:\n page = int(request.GET.get('page', 1))\n except ValueError:\n raise Http404\n paginator = Paginator(qs, limit)\n try:\n page = paginator.page(page)\n except EmptyPage:\n page = paginator.page(paginator.num_pages)\n return paginator, page\n\n@require_GET\ndef main(request):\n quest = m.Question.objects.order_by('-added_at')\n paginator, page = paginate(request, quest)\n return render(request, 'main.html', {'paginator': paginator, 'page': page, 'questions': page.object_list})\n\n@require_GET\ndef popular(request):\n quest = m.Question.objects.order_by('-rating')\n paginator, page = paginate(request, quest)\n return render(request, 'popular.html', {'paginator': paginator, 'page': page, 'questions': page.object_list})\n\ndef question(request, id):\n quest = get_object_or_404(m.Question, id=id)\n try:\n answers = m.Answer.objects.filter(question=quest).all()\n except m.Answer.DoesNotExist:\n answers = []\n #a = Answer(question=quest, author=request.user)\n return render(request, 'question.html', {'quest': quest, 'answers': answers})\n\n", "sub_path": "ask/qa/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1744, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.http.HttpResponse", "line_number": 11, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 23, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 24, "usage_type": "call"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 27, "usage_type": "name"}, {"api_name": "qa.models.Question.objects.order_by", "line_number": 33, "usage_type": "call"}, {"api_name": "qa.models.Question", "line_number": 33, "usage_type": "attribute"}, {"api_name": "qa.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_GET", "line_number": 31, "usage_type": "name"}, {"api_name": "qa.models.Question.objects.order_by", "line_number": 39, "usage_type": "call"}, {"api_name": "qa.models.Question", "line_number": 39, "usage_type": "attribute"}, {"api_name": "qa.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_GET", "line_number": 37, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 44, "usage_type": "call"}, {"api_name": "qa.models.Question", "line_number": 44, "usage_type": "attribute"}, {"api_name": "qa.models", "line_number": 44, "usage_type": "name"}, {"api_name": "qa.models.Answer.objects.filter", "line_number": 46, "usage_type": "call"}, {"api_name": "qa.models.Answer", "line_number": 46, "usage_type": "attribute"}, {"api_name": "qa.models", "line_number": 46, "usage_type": "name"}, {"api_name": "qa.models.Answer", "line_number": 47, "usage_type": "attribute"}, {"api_name": "qa.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "251114003", "text": "import os, config, sqlalchemy_utils\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom contextlib import contextmanager\nfrom utils import development_only\nfrom models import Base\n\nif os.environ['FLASK_APP_SETTINGS'] == 'config.TestingConfig':\n main_db_url = config.Config.TEST_DATABASE_URL\n analytics_db_url = config.Config.TEST_ANALYTICS_DATABASE_URL\nelse:\n main_db_url = config.Config.DATABASE_URL\n analytics_db_url = config.Config.ANALYTICS_DATABASE_URL\n\nanalytics_db_conn = lambda: create_engine(analytics_db_url).connect()\n\nmain_db_engine = create_engine(main_db_url)\nSession = scoped_session(sessionmaker(bind=main_db_engine, autocommit=False, autoflush=True, expire_on_commit=False))\n\n@contextmanager\ndef manage_session():\n this_session = Session()\n try:\n yield this_session\n this_session.commit()\n except Exception as e:\n this_session.rollback()\n raise e\n finally:\n this_session.close()\n\n@development_only\ndef birth_schema(db_url):\n e = create_engine(db_url)\n Base.metadata.create_all(e)\n\n@development_only\ndef drop_and_recreate_db(db_url):\n if sqlalchemy_utils.functions.database_exists(db_url):\n sqlalchemy_utils.functions.drop_database(db_url)\n sqlalchemy_utils.functions.create_database(db_url)", "sub_path": "modules/tabdb.py", "file_name": "tabdb.py", "file_ext": "py", "file_size_in_byte": 1331, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 9, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 10, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 12, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.scoped_session", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 18, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 20, "usage_type": "name"}, {"api_name": "sqlalchemy.create_engine", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Base.metadata.create_all", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Base.metadata", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Base", "line_number": 35, "usage_type": "name"}, {"api_name": "utils.development_only", "line_number": 32, "usage_type": "name"}, {"api_name": "sqlalchemy_utils.functions.database_exists", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy_utils.functions", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sqlalchemy_utils.functions.drop_database", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy_utils.functions", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sqlalchemy_utils.functions.create_database", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy_utils.functions", "line_number": 41, "usage_type": "attribute"}, {"api_name": "utils.development_only", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "310126508", "text": "from django.utils.translation import ugettext_lazy as _\r\nfrom django import forms\r\nfrom django.forms import ModelForm, Textarea, TextInput, CheckboxInput, Select, RadioSelect\r\nfrom . import models_hlq\r\nfrom . import models_cra\r\nfrom . import models_ass\r\nfrom . import models_md\r\n\r\nfrom django.template.loader import render_to_string\r\n\r\nclass SelectWithPop(forms.Select):\r\n def render(self, name, *args, **kwargs):\r\n select_widget = super(SelectWithPop, self).render(name, *args, **kwargs)\r\n add_related_button = render_to_string(\"add_related_button.html\", {'field': name})\r\n \r\n return select_widget + add_related_button\r\n\r\nclass MultipleSelectWithPop(forms.SelectMultiple):\r\n def render(self, name, *args, **kwargs):\r\n multiple_select = super(MultipleSelectWithPop, self).render(name, *args, **kwargs)\r\n add_related_button = render_to_string(\"add_related_button.html\", {'field': name})\r\n \r\n return multiple_select + add_related_button\r\n\r\nclass AssessmentAppDocForm(ModelForm):\r\n class Meta:\r\n model = models_ass.AssessmentAppDoc\r\n fields = [\r\n 'docfile'\r\n ] \r\n \r\nclass QuestionnaireForm(ModelForm):\r\n class Meta:\r\n model = models_hlq.Questionnaire\r\n fields = [\r\n 'interviewer','interviewees','comment'\r\n ]\r\n \r\n widgets = {\r\n 'interviewees' : Textarea(attrs={'rows':4}), \r\n 'comment' : Textarea(attrs={'rows':4}), \r\n }\r\n\r\nclass QuestionnaireAppForm(ModelForm):\r\n class Meta:\r\n model = models_hlq.QuestionnaireApp\r\n fields = [\r\n 'app_name','missioncritical','migrationszenario','virtualized','web_based','standard_os','open_source_based',\r\n 'core_based_license','requirements','is_confidential','high_data_exchange','high_availability','network_latencies',\r\n 'ext_dependencies','auto_deployment','manuals_available','has_int_test'\r\n ]\r\n \r\n YESNO_CHOICES = ((True, _(\"Yes\")), (False, _(\"No\")))\r\n \r\n widgets = {\r\n 'missioncritical' : RadioSelect(choices=YESNO_CHOICES),\r\n 'virtualized' : RadioSelect(choices=YESNO_CHOICES),\r\n 'web_based' : RadioSelect(choices=YESNO_CHOICES),\r\n 'standard_os' : RadioSelect(choices=YESNO_CHOICES),\r\n 'open_source_based' : RadioSelect(choices=YESNO_CHOICES),\r\n 'core_based_license' : RadioSelect(choices=YESNO_CHOICES),\r\n 'requirements' : RadioSelect(choices=YESNO_CHOICES),\r\n 'is_confidential' : RadioSelect(choices=YESNO_CHOICES),\r\n 'high_data_exchange' : RadioSelect(choices=YESNO_CHOICES),\r\n 'high_availability' : RadioSelect(choices=YESNO_CHOICES),\r\n 'network_latencies' : RadioSelect(choices=YESNO_CHOICES),\r\n 'ext_dependencies' : RadioSelect(choices=YESNO_CHOICES),\r\n 'auto_deployment' : RadioSelect(choices=YESNO_CHOICES),\r\n 'manuals_available' : RadioSelect(choices=YESNO_CHOICES),\r\n 'has_int_test' : RadioSelect(choices=YESNO_CHOICES),\r\n 'comment' : Textarea(attrs={'rows':2}), \r\n }\r\n\r\nclass CloudReadinessForm(ModelForm):\r\n class Meta:\r\n model = models_cra.CloudReadiness\r\n fields = [\r\n 'interviewer','interviewees','comment'\r\n ]\r\n \r\n widgets = {\r\n 'interviewees' : Textarea(attrs={'rows':4}), \r\n 'comment' : Textarea(attrs={'rows':4}), \r\n }\r\n\r\nclass CloudReadinessAppForm(ModelForm):\r\n class Meta:\r\n model = models_cra.CloudReadinessApp\r\n fields = [\r\n 'app_name','missioncritical','migrationszenario',\r\n 'server_count', 'interfaces_count', 'tier_count', 'config_level', 'packages_count', 'security_requirements', 'installation_support', \r\n 'installation_script', 'virtualization', 'testdata', \r\n 'firewall_rules', 'loadbalancer', 'external_interfaces'\r\n ]\r\n \r\n YESNO_CHOICES = ((True, _(\"Yes\")), (False, _(\"No\")))\r\n \r\n widgets = {\r\n 'installation_script': RadioSelect(),\r\n 'virtualization': RadioSelect(),\r\n 'testdata': RadioSelect(),\r\n 'firewall_rules': RadioSelect(),\r\n 'loadbalancer': RadioSelect(),\r\n 'external_interfaces': RadioSelect(),\r\n 'missioncritical': RadioSelect(choices=YESNO_CHOICES), \r\n 'missioncritical': RadioSelect(choices=YESNO_CHOICES), \r\n 'comment': Textarea(attrs={'rows':2}), \r\n }\r\n \r\nclass AssessmentForm(ModelForm):\r\n class Meta:\r\n model = models_ass.Assessment\r\n fields = [\r\n 'cluster','interviewer','interviewees','comment'\r\n ]\r\n \r\n widgets = {\r\n 'interviewees' : Textarea(attrs={'rows':4}), \r\n 'comment' : Textarea(attrs={'rows':4}), \r\n }\r\n \r\nclass AssessmentAppForm(ModelForm): \r\n class Meta:\r\n model = models_ass.AssessmentApp\r\n exclude = ('id', 'assessment', 'app_id', \r\n 'tk_distribution', 'tk_business_area', 'tk_business_unit', \r\n 'migration_lead', 'migration_coordinator', \r\n 'lupd_timestamp', 'lupd_user')\r\n\r\n YESNO_CHOICES = ((True, _(\"Yes\")), (False, _(\"No\")))\r\n widgets = {\r\n 'app_users': RadioSelect(),\r\n 'tk_user_spread': RadioSelect(),\r\n 'criticality_id': RadioSelect(),\r\n 'confidentiality_id': RadioSelect(),\r\n 'availability_id': RadioSelect(),\r\n 'integrity_id': RadioSelect(),\r\n 'app_interfaces_id': RadioSelect(),\r\n 'app_category': SelectWithPop(),\r\n 'location': SelectWithPop(),\r\n 'app_vendor': SelectWithPop(),\r\n 'authentication_type': SelectWithPop(),\r\n 'app_manager': SelectWithPop(),\r\n 'business_expert': SelectWithPop(),\r\n 'app_architect': SelectWithPop(),\r\n 'it_coordinator': SelectWithPop(),\r\n 'migration_manager': SelectWithPop(),\r\n 'migration_lead': SelectWithPop(),\r\n 'migration_coordinator': SelectWithPop(),\r\n 'technical_expert': SelectWithPop(),\r\n 'app_description': Textarea(attrs={'rows':2}),\r\n 'compliance': Textarea(attrs={'rows':2}),\r\n 'external_storage': Textarea(attrs={'rows':2}),\r\n 'service_accounts': Textarea(attrs={'rows':2}),\r\n 'migration_restrictions': Textarea(attrs={'rows':2}),\r\n 'backup_types': Textarea(attrs={'rows':2}),\r\n 'rto_requirements': Textarea(attrs={'rows':2}),\r\n 'rpo_requirements': Textarea(attrs={'rows':2}),\r\n 'special_hardware': Textarea(attrs={'rows':2}),\r\n 'infrastr_requirements': Textarea(attrs={'rows':2}),\r\n 'archiving_required': RadioSelect(choices=YESNO_CHOICES),\r\n 'migrate_backups': RadioSelect(choices=YESNO_CHOICES),\r\n 'migrate_archives': RadioSelect(choices=YESNO_CHOICES),\r\n 'can_virtual_server': RadioSelect(choices=YESNO_CHOICES),\r\n 'can_virtual_client': RadioSelect(choices=YESNO_CHOICES),\r\n 'can_shared_db_cluster': RadioSelect(choices=YESNO_CHOICES),\r\n }\r\n \r\nclass AssessmentAppSrvForm(ModelForm):\r\n class Meta:\r\n model = models_ass.Server\r\n exclude = ('id', 'application', 'lupd_timestamp', 'lupd_user')\r\n YESNO_CHOICES = ((True, _(\"Yes\")), (False, _(\"No\")))\r\n widgets = {\r\n 'location': SelectWithPop(),\r\n 'operating_system': SelectWithPop(),\r\n 'database_system': SelectWithPop(),\r\n 'platform': RadioSelect(choices=YESNO_CHOICES),\r\n 'web_conn_required': RadioSelect(choices=YESNO_CHOICES),\r\n 'lb_required': RadioSelect(choices=YESNO_CHOICES),\r\n 'in_dmz': RadioSelect(choices=YESNO_CHOICES),\r\n 'archiving_required': RadioSelect(choices=YESNO_CHOICES),\r\n 'db_instances': Textarea(attrs={'rows':2}),\r\n 'db_service_accounts': Textarea(attrs={'rows':2}),\r\n 'software': Textarea(attrs={'rows':2}),\r\n 'server_interfaces': Textarea(attrs={'rows':2}),\r\n 'comment': Textarea(attrs={'rows':2})\r\n }\r\n \r\nclass AssessmentAppIntForm(ModelForm):\r\n class Meta:\r\n model = models_ass.Interface\r\n exclude = ('id', 'application', 'lupd_timestamp', 'lupd_user')\r\n widgets = {\r\n 'interface_type' : SelectWithPop(),\r\n 'description' : Textarea(attrs={'rows':2}),\r\n 'interface_from' : Textarea(attrs={'rows':2}),\r\n 'interface_to' : Textarea(attrs={'rows':2}),\r\n 'port_range' : Textarea(attrs={'rows':2}),\r\n 'comment' : Textarea(attrs={'rows':2}) \r\n }\r\n\r\nclass CategoryAddRelatedForm(forms.ModelForm):\r\n class Meta:\r\n model = models_md.AppCategory\r\n exclude = ('id',)\r\n\r\nclass LocationAddRelatedForm(forms.ModelForm):\r\n class Meta:\r\n model = models_md.Location\r\n exclude = ('id',)\r\n\r\nclass AppVendorAddRelatedForm(forms.ModelForm):\r\n class Meta:\r\n model = models_md.AppVendor\r\n exclude = ('id',)\r\n\r\nclass AuthenticationTypeAddRelatedForm(forms.ModelForm):\r\n class Meta:\r\n model = models_md.AuthenticationType\r\n exclude = ('id',)\r\n\r\nclass DatabaseSystemAddRelatedForm(forms.ModelForm):\r\n class Meta:\r\n model = models_md.DatabaseSystem\r\n exclude = ('id',)\r\n\r\nclass OperatingSystemAddRelatedForm(forms.ModelForm):\r\n class Meta:\r\n model = models_md.OperatingSystem\r\n exclude = ('id',)\r\n\r\nclass InterfaceTypeAddRelatedForm(forms.ModelForm):\r\n class Meta:\r\n model = models_md.InterfaceType\r\n exclude = ('id',)\r\n\r\nclass ContactAddRelatedForm(forms.ModelForm):\r\n class Meta:\r\n model = models_md.Contact\r\n fields = ['last_name', 'first_name', 'email_address', 'phone_number']\r\n\r\n#class AppCategoryAddForm(forms.ModelForm):\r\n# class Meta:\r\n# model = AppCategory\r\n# fields = ['category']\r\n#\r\n#class LocationAddForm(forms.ModelForm):\r\n# class Meta:\r\n# model = Location\r\n# fields = ['location']\r\n# \r\n#class AppVendorAddForm(forms.ModelForm):\r\n# class Meta:\r\n# model = AppVendor\r\n# fields = ['vendor']\r\n#\r\n#class AuthenticationTypeAddForm(forms.ModelForm):\r\n# class Meta:\r\n# model = AuthenticationType\r\n# fields = ['auth_type']\r\n# \r\n#class DatabaseSystemTypeAddForm(forms.ModelForm):\r\n# class Meta:\r\n# model = DatabaseSystem\r\n# fields = ['dbs']", "sub_path": "app_ac/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 11577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.forms.Select", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 14, "usage_type": "call"}, {"api_name": "django.forms.SelectMultiple", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 21, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 25, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 32, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 40, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 41, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 44, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 53, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 56, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 57, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 58, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 59, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 60, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 61, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 62, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 63, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 64, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 65, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 66, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 67, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 68, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 69, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 70, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 71, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 74, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 82, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 83, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 86, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 96, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 99, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 100, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 101, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 102, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 103, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 104, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 105, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 106, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 107, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 110, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 118, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 119, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 122, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 130, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 132, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 133, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 134, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 135, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 136, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 137, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 138, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 151, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 152, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 153, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 154, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 155, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 156, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 157, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 158, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 159, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 160, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 161, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 162, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 163, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 164, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 165, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 166, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 169, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 173, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 178, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 179, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 180, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 181, "usage_type": "call"}, {"api_name": "django.forms.RadioSelect", "line_number": 182, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 183, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 184, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 185, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 186, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 187, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 190, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 196, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 197, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 198, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 199, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 200, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 203, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 203, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 208, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 208, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 213, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 213, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 218, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 218, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 223, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 223, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 228, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 228, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 233, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 233, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 238, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 238, "usage_type": "name"}]} +{"seq_id": "444001840", "text": "import matplotlib.pyplot as plt\nimport os\n\nif (__name__ == '__main__'):\n dirs = ['none.dir','flip.dir','allangle.dir']\n dirs_nobn = ['none_nobn.dir','flip_nobn.dir','allangle_nobn.dir']\n titles = ['no_rotation', 'with_flipping', 'with_all_angle']\n for k in [0,1,2]:\n dir1 = dirs[k]\n fid = open(os.path.join(dir1,'log.txt'),'r')\n lines = fid.readlines()\n x = []\n y1 = []\n y2 = []\n for line in lines:\n tmp = line.split(\" \")\n x.append(float(tmp[1]))\n y1.append(float(tmp[3]))\n y2.append(float(tmp[5]))\n plt.plot(x,y1,'bo-')\n plt.plot(x,y2,'ro-')\n\n dir2 = dirs_nobn[k]\n fid = open(os.path.join(dir2,'log.txt'),'r')\n lines = fid.readlines()\n x = []\n y1 = []\n y2 = []\n for line in lines:\n tmp = line.split(\" \") \n x.append(float(tmp[1]))\n y1.append(float(tmp[3]))\n y2.append(float(tmp[5]))\n plt.plot(x,y1,'gx:')\n plt.plot(x,y2,'kx:')\n\n plt.title(titles[k])\n plt.legend(['train','test','train nobn','test nobn'])\n plt.ylim([0,0.03])\n #plt.show()\n plt.savefig(titles[k]+'.pdf')\n plt.close()\n", "sub_path": "project3/stage2/compare.py", "file_name": "compare.py", "file_ext": "py", "file_size_in_byte": 1250, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "555556615", "text": "import requests\nimport json\nimport numpy\nimport math\nimport time\nimport datetime\nfrom threading import Timer\n\nfrom config import API_TOKEN, METRICS_FILE, API_URI\nfrom models.metric import Metric\nfrom models.traceback import Traceback\nfrom models.project import Project\n\n# Retrieve from external API new logs\n\n\ndef getNewLogs():\n try:\n new_logs_request = requests.get(API_URI, headers={\"Authorization\": API_TOKEN})\n if new_logs_request.status_code != 200:\n return False\n return new_logs_request.json()\n except:\n return False\n\n# Returns local time, in a defined format\n\n\ndef getTime():\n return \"[\"+str(datetime.datetime.now()).split(\" \")[1].split(\".\")[0]+\"]\"\n\n\ndef main():\n # This array will store the duration of all requests\n req_duration_samples = []\n # Initializes a new Metric object\n metrics = Metric()\n # Loads old metrics, stored on file\n metrics.loadFromFile()\n # Retrieve new logs from external API\n print(getTime()+\" Downloading new logs from external API...\")\n logs = getNewLogs()\n print(getTime()+\" Done!\")\n if logs:\n for row in logs:\n if \"request_duration\" in row:\n req_duration_samples.append(row[\"request_duration\"])\n # Creates a new project in metrics object, if it's not already there\n metrics.newProject(row[\"project\"], row[\"timestamp\"], row[\"level\"])\n # If this row contains a traceback, adds it in metrics object\n if \"traceback\" in row:\n metrics.newTraceback(row[\"project\"], row[\"timestamp\"], row[\"traceback\"])\n # Updates mean and standard deviation\n metrics.updateMnD(req_duration_samples)\n # Store updates on file\n print(getTime()+\" Saving metrics...\")\n metrics.save()\n print(getTime()+\" Done!\")\n\nprint(getTime()+\" Initializing...\")\n# Runs the main method for the first time\nmain()\nwhile True:\n # Every 60 seconds, creates a new thread that runs the main method\n t = Timer(60.0, main)\n t.start()\n time.sleep(60)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2060, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "config.API_URI", "line_number": 19, "usage_type": "argument"}, {"api_name": "config.API_TOKEN", "line_number": 19, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.metric.Metric", "line_number": 37, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 65, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "132688284", "text": "from tortoise import Tortoise\nfrom tortoise.backends.asyncpg.schema_generator import AsyncpgSchemaGenerator\nfrom tortoise.backends.mysql.schema_generator import MySQLSchemaGenerator\nfrom tortoise.backends.sqlite.schema_generator import SqliteSchemaGenerator\nfrom tortoise.contrib import test\n\nfrom aerich.ddl.mysql import MysqlDDL\nfrom aerich.ddl.postgres import PostgresDDL\nfrom aerich.ddl.sqlite import SqliteDDL\nfrom tests.models import Category\n\n\nclass TestDDL(test.TruncationTestCase):\n maxDiff = None\n\n def setUp(self) -> None:\n client = Tortoise.get_connection(\"models\")\n if client.schema_generator is MySQLSchemaGenerator:\n self.ddl = MysqlDDL(client)\n elif client.schema_generator is SqliteSchemaGenerator:\n self.ddl = SqliteDDL(client)\n elif client.schema_generator is AsyncpgSchemaGenerator:\n self.ddl = PostgresDDL(client)\n\n def test_create_table(self):\n ret = self.ddl.create_table(Category)\n if isinstance(self.ddl, MysqlDDL):\n self.assertEqual(\n ret,\n \"\"\"CREATE TABLE IF NOT EXISTS `category` (\n `id` INT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n `slug` VARCHAR(200) NOT NULL,\n `name` VARCHAR(200) NOT NULL,\n `created_at` DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),\n `user_id` INT NOT NULL COMMENT 'User',\n CONSTRAINT `fk_category_user_e2e3874c` FOREIGN KEY (`user_id`) REFERENCES `user` (`id`) ON DELETE CASCADE\n) CHARACTER SET utf8mb4;\"\"\",\n )\n elif isinstance(self.ddl, SqliteDDL):\n self.assertEqual(\n ret,\n \"\"\"CREATE TABLE IF NOT EXISTS \"category\" (\n \"id\" INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n \"slug\" VARCHAR(200) NOT NULL,\n \"name\" VARCHAR(200) NOT NULL,\n \"created_at\" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n \"user_id\" INT NOT NULL REFERENCES \"user\" (\"id\") ON DELETE CASCADE /* User */\n);\"\"\",\n )\n elif isinstance(self.ddl, PostgresDDL):\n self.assertEqual(\n ret,\n \"\"\"CREATE TABLE IF NOT EXISTS \"category\" (\n \"id\" SERIAL NOT NULL PRIMARY KEY,\n \"slug\" VARCHAR(200) NOT NULL,\n \"name\" VARCHAR(200) NOT NULL,\n \"created_at\" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n \"user_id\" INT NOT NULL REFERENCES \"user\" (\"id\") ON DELETE CASCADE\n);\nCOMMENT ON COLUMN \"category\".\"user_id\" IS 'User';\"\"\",\n )\n\n def test_drop_table(self):\n ret = self.ddl.drop_table(Category)\n self.assertEqual(ret, \"DROP TABLE IF EXISTS category\")\n\n def test_add_column(self):\n ret = self.ddl.add_column(Category, Category._meta.fields_map.get(\"name\"))\n if isinstance(self.ddl, MysqlDDL):\n self.assertEqual(ret, \"ALTER TABLE category ADD `name` VARCHAR(200) NOT NULL\")\n elif isinstance(self.ddl, PostgresDDL):\n self.assertEqual(ret, 'ALTER TABLE category ADD \"name\" VARCHAR(200) NOT NULL')\n elif isinstance(self.ddl, SqliteDDL):\n self.assertEqual(ret, 'ALTER TABLE category ADD \"name\" VARCHAR(200) NOT NULL')\n\n def test_drop_column(self):\n ret = self.ddl.drop_column(Category, \"name\")\n self.assertEqual(ret, \"ALTER TABLE category DROP COLUMN name\")\n self.assertEqual(ret, \"ALTER TABLE category DROP COLUMN name\")\n\n def test_add_index(self):\n index = self.ddl.add_index(Category, [\"name\"])\n index_u = self.ddl.add_index(Category, [\"name\"], True)\n if isinstance(self.ddl, MysqlDDL):\n self.assertEqual(\n index, \"ALTER TABLE category ADD INDEX idx_category_name_8b0cb9 (`name`)\"\n )\n self.assertEqual(\n index_u, \"ALTER TABLE category ADD UNIQUE INDEX uid_category_name_8b0cb9 (`name`)\"\n )\n elif isinstance(self.ddl, SqliteDDL):\n self.assertEqual(\n index_u, 'ALTER TABLE category ADD UNIQUE INDEX uid_category_name_8b0cb9 (\"name\")'\n )\n self.assertEqual(\n index_u, 'ALTER TABLE category ADD UNIQUE INDEX uid_category_name_8b0cb9 (\"name\")'\n )\n\n def test_drop_index(self):\n ret = self.ddl.drop_index(Category, [\"name\"])\n self.assertEqual(ret, \"ALTER TABLE category DROP INDEX idx_category_name_8b0cb9\")\n ret = self.ddl.drop_index(Category, [\"name\"], True)\n self.assertEqual(ret, \"ALTER TABLE category DROP INDEX uid_category_name_8b0cb9\")\n\n def test_add_fk(self):\n ret = self.ddl.add_fk(Category, Category._meta.fields_map.get(\"user\"))\n self.assertEqual(\n ret,\n \"ALTER TABLE category ADD CONSTRAINT `fk_category_user_e2e3874c` FOREIGN KEY (`user_id`) REFERENCES `user` (`id`) ON DELETE CASCADE\",\n )\n\n def test_drop_fk(self):\n ret = self.ddl.drop_fk(Category, Category._meta.fields_map.get(\"user\"))\n self.assertEqual(ret, \"ALTER TABLE category DROP FOREIGN KEY fk_category_user_e2e3874c\")\n", "sub_path": "tests/test_ddl.py", "file_name": "test_ddl.py", "file_ext": "py", "file_size_in_byte": 4962, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "tortoise.contrib.test.TruncationTestCase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tortoise.contrib.test", "line_number": 13, "usage_type": "name"}, {"api_name": "tortoise.Tortoise.get_connection", "line_number": 17, "usage_type": "call"}, {"api_name": "tortoise.Tortoise", "line_number": 17, "usage_type": "name"}, {"api_name": "tortoise.backends.mysql.schema_generator.MySQLSchemaGenerator", "line_number": 18, "usage_type": "name"}, {"api_name": "aerich.ddl.mysql.MysqlDDL", "line_number": 19, "usage_type": "call"}, {"api_name": "tortoise.backends.sqlite.schema_generator.SqliteSchemaGenerator", "line_number": 20, "usage_type": "name"}, {"api_name": "aerich.ddl.sqlite.SqliteDDL", "line_number": 21, "usage_type": "call"}, {"api_name": "tortoise.backends.asyncpg.schema_generator.AsyncpgSchemaGenerator", "line_number": 22, "usage_type": "name"}, {"api_name": "aerich.ddl.postgres.PostgresDDL", "line_number": 23, "usage_type": "call"}, {"api_name": "tests.models.Category", "line_number": 26, "usage_type": "argument"}, {"api_name": "aerich.ddl.mysql.MysqlDDL", "line_number": 27, "usage_type": "argument"}, {"api_name": "aerich.ddl.sqlite.SqliteDDL", "line_number": 39, "usage_type": "argument"}, {"api_name": "aerich.ddl.postgres.PostgresDDL", "line_number": 50, "usage_type": "argument"}, {"api_name": "tests.models.Category", "line_number": 64, "usage_type": "argument"}, {"api_name": "tests.models.Category", "line_number": 68, "usage_type": "argument"}, {"api_name": "tests.models.Category._meta.fields_map.get", "line_number": 68, "usage_type": "call"}, {"api_name": "tests.models.Category._meta", "line_number": 68, "usage_type": "attribute"}, {"api_name": "aerich.ddl.mysql.MysqlDDL", "line_number": 69, "usage_type": "argument"}, {"api_name": "aerich.ddl.postgres.PostgresDDL", "line_number": 71, "usage_type": "argument"}, {"api_name": "aerich.ddl.sqlite.SqliteDDL", "line_number": 73, "usage_type": "argument"}, {"api_name": "tests.models.Category", "line_number": 77, "usage_type": "argument"}, {"api_name": "tests.models.Category", "line_number": 82, "usage_type": "argument"}, {"api_name": "tests.models.Category", "line_number": 83, "usage_type": "argument"}, {"api_name": "aerich.ddl.mysql.MysqlDDL", "line_number": 84, "usage_type": "argument"}, {"api_name": "aerich.ddl.sqlite.SqliteDDL", "line_number": 91, "usage_type": "argument"}, {"api_name": "tests.models.Category", "line_number": 100, "usage_type": "argument"}, {"api_name": "tests.models.Category", "line_number": 102, "usage_type": "argument"}, {"api_name": "tests.models.Category", "line_number": 106, "usage_type": "argument"}, {"api_name": "tests.models.Category._meta.fields_map.get", "line_number": 106, "usage_type": "call"}, {"api_name": "tests.models.Category._meta", "line_number": 106, "usage_type": "attribute"}, {"api_name": "tests.models.Category", "line_number": 113, "usage_type": "argument"}, {"api_name": "tests.models.Category._meta.fields_map.get", "line_number": 113, "usage_type": "call"}, {"api_name": "tests.models.Category._meta", "line_number": 113, "usage_type": "attribute"}]} +{"seq_id": "651872310", "text": "import preparation\nimport numpy as np\nimport os\nimport subprocess\nimport time\nimport rospy\nfrom sensor_msgs.msg import Image, CameraInfo\nfrom gazebo_msgs.srv import GetLinkState, GetModelState, SetModelState, DeleteModel\nimport cv2\nimport tf\nimport geometry_msgs\nimport moveit_commander\nimport moveit_msgs.msg\nfrom moveit_msgs.msg import RobotTrajectory\nimport geometry_msgs.msg\nfrom actionlib_msgs.msg import GoalStatusArray\nimport sys\nimport threading\nimport math\nfrom std_msgs.msg import (UInt16, Float64)\nfrom sensor_msgs.msg import JointState\nfrom std_srvs.srv import Empty\nfrom gazebo_msgs.msg import LinkState, ModelStates, ModelState\nfrom geometry_msgs.msg import Point, Quaternion\nimport copy\nimport addObj\nfrom learning import *\n\n\n\nW = 640\nH = 480\n\nimage = None\ndepth_image = None\ncameraInfo = None\nstatus = None\nexecution = False\nix, iy = None, None\ncurrentHeight = None\nstate = None\nheight_map = None\n\naction, result, executing = None, None, False\n\ndef status_callback(data):\n global status\n if len(data.status_list) == 0: return\n status = data.status_list[-1].status\n # print(\" I am updating status: %d\" % status)\n\n\ndef image_callback(data):\n global image\n tmp = np.fromstring(data.data, np.uint8)\n image = np.reshape(tmp, (data.height, data.width, 3))[:, :, ::-1]\n\n\ndef depth_image_callback(data):\n global depth_image\n tmp = np.fromstring(data.data, np.float32)\n tmp = np.reshape(tmp, (data.height, data.width))\n # print(tmp)\n # tmp = np.reshape(tmp, (data.height, data.width, 4))\n # tmp = np.array(tmp, dtype=np.float)\n # tmp = tmp[:,:,0] + tmp[:,:,1] * 256 + tmp[:,:,2] * 256 ** 2 #+ tmp[:,:,3] * 256 ** 3\n # print(tmp)\n depth_image = tmp # / np.max(tmp)\n # print(depth_image.shape)\n\n\ndef P_callback(data):\n global cameraInfo\n cameraInfo = data.P\n\n\n\ndef jstates_callback(data):\n global state\n state = data\n\n\npreparation.init()\npreparation.spawn_camera()\n\n\nt = tf.TransformerROS(True, rospy.Duration(10.0))\npr = rospy.ServiceProxy('/gazebo/get_link_state', GetLinkState)\nres = pr(\"camera::base_link\", \"world\")\ntr = res.link_state.pose.position\nro = res.link_state.pose.orientation\nmat = t.fromTranslationRotation((tr.x, tr.y, tr.z), (ro.x, ro.y, ro.z, ro.w))\n\nmoveit_commander.roscpp_initialize(sys.argv)\nrobot = moveit_commander.RobotCommander()\nscene = moveit_commander.PlanningSceneInterface()\ngroup = moveit_commander.MoveGroupCommander(\"Arm\")\n\nrospy.init_node('aNode')\n_sub_image = rospy.Subscriber('/qhd/image_color_rect', Image, image_callback)\n_sub_depth_image = rospy.Subscriber(\n '/qhd/image_depth_rect', Image, depth_image_callback)\n_sub_P = rospy.Subscriber('/qhd/camera_info', CameraInfo, P_callback)\n\n_pub_cmds = rospy.Publisher('/jaco/joint_control', JointState, queue_size=50)\n_sub_states = rospy.Subscriber(\n '/jaco/joint_state', JointState, jstates_callback)\n\n_sub_status = rospy.Subscriber('/jaco/joint_trajectory_action/status', GoalStatusArray, status_callback)\n\ndef clopen(p):\n sp = list(state.position)\n for i in range(6, 9):\n sp[i] = p\n s = state\n s.position = tuple(sp)\n s.velocity = tuple([0] * 9)\n _pub_cmds.publish(s)\n time.sleep(3)\n # for i in range(100):\n # print(p)\n\n\n# def move_and_descend(x, y, z1, z2):\n# global currentHeight, status\n# currentHeight = z2\n# print(\"============ Generating plan \")\n# cp = robot.get_link(group.get_end_effector_link()).pose()\n \n# # for i in range(10): print(\"pose\")\n# # print(group.get_current_pose().pose)\n\n# pose_target = cp\n \n# pose_target.pose.orientation.x = 0\n# pose_target.pose.orientation.y = -np.sqrt(0.5)\n# pose_target.pose.orientation.z = 0\n# pose_target.pose.orientation.w = np.sqrt(0.5)\n# pose_target.pose.position.x = x\n# pose_target.pose.position.y = y\n# pose_target.pose.position.z = z1\n\n# pose_target2 = copy.deepcopy(pose_target)\n# pose_target2.pose.position.z = z2\n# waypoints = [pose_target.pose, pose_target2.pose]\n\n# print(\"============ before plan \")\n# print(group.get_planning_frame())\n# (plan, fraction) = group.compute_cartesian_path(\n# waypoints, # waypoints to follow\n# 0.01, # eef_step\n# 0.0) # jump_threshold\n# print(\"============ after plan \")\n\n# print(\"============ Plan Generated...\")\n\n# print(plan)\n\n# if plan.joint_trajectory.points == []: \n# for i in range(10): print(\"no plan give up\")\n# return\n# status = None\n# group.go(wait=False)\n# # while status != \"This goal has been accepted by the simple action server\":\n# # print(\"waiting for acc\")\n# time.sleep(2)\n# start = time.time()\n# lastreport = time.time()\n# while status != 3:\n# if ((time.time() - lastreport) > 0.1):\n# print(status)\n# print(\"waiting for %lf, status: %d(3 means ok, 1 means doing)\" % (time.time() - start, status))\n# lastreport = time.time()\n# if time.time() - start > 10:\n# group.stop()\n# for i in range(10): print(\"timeout stop\")\n# return\n# for i in range(10): print(\"move success\")\n\n\ndef move_to(x, y, z, dir, timeout):\n group.set_planning_time(timeout)\n roll = np.pi / 4 * dir\n pitch = -np.pi / 2\n yaw = 0\n quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)\n global currentHeight, status\n currentHeight = z\n print(\"============ Generating plan \")\n cp = robot.get_link(group.get_end_effector_link()).pose()\n pose_target = cp\n pose_target.pose.orientation.x = quaternion[0]\n pose_target.pose.orientation.y = quaternion[1]\n pose_target.pose.orientation.z = quaternion[2]\n pose_target.pose.orientation.w = quaternion[3]\n pose_target.pose.position.x = x\n pose_target.pose.position.y = y\n pose_target.pose.position.z = z\n group.set_pose_target(pose_target)\n \n print(\"============ before plan \")\n\n plan1 = group.plan()\n print(\"============ after plan \")\n\n print(\"============ Plan Generated...\")\n if plan1 == None or plan1.joint_trajectory.points == []: \n for i in range(1): print(\"no plan give up\")\n return False\n status = None\n traj = plan1\n n_joints = len(traj.joint_trajectory.joint_names)\n n_points = len(traj.joint_trajectory.points)\n spd = 1.5\n for i in range(n_points):\n traj.joint_trajectory.points[i].time_from_start = traj.joint_trajectory.points[i].time_from_start / spd\n traj.joint_trajectory.points[i].velocities = tuple([traj.joint_trajectory.points[i].velocities[j] * spd for j in range(n_joints)])\n traj.joint_trajectory.points[i].accelerations = tuple([traj.joint_trajectory.points[i].accelerations[j] * spd for j in range(n_joints)])\n traj.joint_trajectory.points[i].positions = tuple([traj.joint_trajectory.points[i].positions[j] for j in range(n_joints)])\n print(plan1)\n group.execute(traj, wait=False)\n time.sleep(1)\n start = time.time()\n lastreport = time.time()\n while status != 3:\n if ((time.time() - lastreport) > 0.1):\n print(status)\n if not status is None: print(\"waiting for %lf, status: %d(3 means ok, 1 means doing)\" % (time.time() - start, status))\n lastreport = time.time()\n if time.time() - start > 10:\n group.stop()\n for i in range(1): print(\"timeout stop\")\n return False\n for i in range(1): print(\"move success\")\n return True\n \n\n\n\ndef click_callback(event, x, y, flags, param):\n global action\n\n if event == cv2.EVENT_LBUTTONDOWN:\n print(\"point1:=\", x, y)\n if not executing:\n action = x, y\n\n\ndef convertHeightMap():\n global height_map\n while True:\n if not depth_image is None:\n # print(depth_image.shape)\n pts = preparation.pointFromDepth(cameraInfo, mat, depth_image)\n\n # f = open(\"testPt%d.txt\" % np.random.randint(100), \"w\")\n # for i in range(H):\n # for j in range(W):\n # f.write(\"%lf %lf %lf\\n\" % (pts[i, j, 0], pts[i, j, 1], pts[i, j, 2]))\n # f.close()\n\n height_map = preparation.heightMapFromPoint(\n cameraInfo, np.linalg.inv(mat), pts, W, H, 0.73, 1.2)\n\n\nconvertThread = threading.Thread(target=convertHeightMap)\nconvertThread.setDaemon(True)\nconvertThread.start()\n\n\n\ncv2.namedWindow('x')\ncv2.namedWindow('y')\n\ncv2.setMouseCallback('x', click_callback)\n\n\ngetModelState = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)\nsetModelState = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)\ndeleteModel = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel)\n\n\ntime.sleep(5)\nclopen(-0.4)\nbackup_robot = state\n\nstates_backup = None\nmodels = {}\nfilenames = {}\n\ndef rearrange():\n global states_backup, models\n for model in models:\n deleteModel(model)\n # scene.remove_world_object(model)\n numObj = 1\n models = {}\n while True:\n modelname, filename = addObj.add()\n models[modelname] = 1\n filenames[modelname] = filename\n print(modelname)\n time.sleep(0.5)\n model_to_del = []\n for model in models:\n state0 = getModelState(model, \"world\")\n # if state0.pose.position.z < 0.5: // this\n # print(state0.pose.position.z)\n # model_to_del.append(model)\n for model in model_to_del:\n deleteModel(model)\n del models[model]\n if len(models) == numObj:\n break\n states_backup = {}\n time.sleep(1)\n ok = True\n for model in models:\n state0 = getModelState(model, \"world\")\n states_backup[model] = state0\n # if abs(state0.twist.angular.x) > 1 or abs(state0.twist.angular.y) > 1 or abs(state0.twist.angular.z) > 1: // this\n # ok = False\n # if state0.pose.position.z < 0.5:\n # ok = False\n if not ok:\n rearrange()\n # for model in models:\n # pose = geometry_msgs.msg.PoseStamped()\n # pose.header.frame_id = \"world\"\n # pose.pose = states_backup[model].pose\n # fname = filenames[model]\n # print(fname)\n # scene.add_mesh(model, pose, fname)\n # print(\"scene add mesh\")\n \n# # set still\n# for i in range(50):\n# for model in models:\n# state0 = getModelState(model, \"world\")\n# # print(state0)\n# state_msg = ModelState()\n# state_msg.model_name = model\n# state_msg.pose = state0.pose\n# setModelState(state_msg)\n# time.sleep(0.1)\n\nrearrange()\n\ndef reset():\n global states_backup\n sp = list(backup_robot.position)\n s = backup_robot\n s.position = tuple(sp)\n s.velocity = tuple([0] * 9)\n _pub_cmds.publish(s)\n time.sleep(1)\n for model in models:\n state0 = states_backup[model]\n # print(state0)\n state_msg = ModelState()\n state_msg.model_name = model\n state_msg.pose = state0.pose\n setModelState(state_msg)\n\n \n\ndef execution():\n\n \n global action, result, executing\n\n\n while True:\n if action != None:\n executing = True\n for i in range(10): print(\"start executing\")\n\n ix, iy, dir = action\n objHeight = 0.78\n for i in range(-10, 10):\n for j in range(-10, 10):\n if iy + i >= 0 and ix + j >= 0 and iy + i < 480 and ix + j < 640:\n print(iy + i, ix + j)\n objHeight = max(objHeight, height_map[iy + i, ix + j])\n\n x_w, y_w, z_w = preparation.PointOnTable(ix, iy, cameraInfo, mat, 0.73)\n flag = 0\n\n for model in models:\n pose = geometry_msgs.msg.PoseStamped()\n pose.header.frame_id = \"world\"\n pose.pose = states_backup[model].pose\n fname = filenames[model]\n print(fname)\n scene.add_mesh(model, pose, fname)\n print(\"scene add mesh\")\n\n # if not move_to(x_w, y_w, 1.2, dir):\n # reset()\n # result = -1\n # for i in range(100): print(result)\n # action = None\n # executing = False\n # for model in models:\n # scene.remove_world_object(model)\n # continue\n\n height_plus = 0.05\n while not move_to(x_w, y_w, objHeight + height_plus, dir, 0.1):\n height_plus += 0.03\n if objHeight + height_plus > 1.2:\n result = flag\n for i in range(1): print(result)\n action = None\n executing = False\n for model in models:\n scene.remove_world_object(model)\n break\n if objHeight + height_plus > 1.2:\n continue\n \n for model in models:\n scene.remove_world_object(model)\n time.sleep(1)\n clopen(1)\n for i in range(1): print(\"closed\")\n\n\n\n\n # if not move_to(x_w, y_w, 1.2, dir):\n # reset()\n # result = flag\n # for i in range(100): print(result)\n # action = None\n # executing = False\n # continue\n\n x_w, y_w, z_w = preparation.PointOnTable(600, 230, cameraInfo, mat, 0.73)\n move_to(x_w, y_w, 1.3, 0, 1)\n\n \n for model in models:\n state = getModelState(model, \"world\")\n if state.pose.position.z > 0.8 and state.pose.position.x > 0.4:\n flag = 1\n\n reset()\n\n result = flag\n for i in range(1): print(result)\n action = None\n executing = False\n\nexecuteThread = threading.Thread(target=execution)\nexecuteThread.setDaemon(True)\nexecuteThread.start()\n\n\n# for i in range(100):\n# print(\"finish spawn\")\n\n\n\naction_filename = \"action_online_single.txt\"\nresult_filename = \"result_online_single.txt\"\nresults_prev = [line for line in open(result_filename, \"r\")]\n\nprint(results_prev)\nactions_prev = [line for line in open(action_filename, \"r\")][:len(results_prev)]\naction_file_new = open(action_filename, \"w\")\nresult_file_new = open(result_filename, \"w\")\nfor line in results_prev:\n result_file_new.write(line)\nfor line in actions_prev:\n action_file_new.write(line)\naction_file_new.close()\nresult_file_new.close()\n\n\niters_per_scene = 3\nitercnt = len(results_prev)\n\nsave_dir = \"/media/cscg/9b15ba08-9bee-40c1-ab08-870a5a5dd3ca/online_grasping_single\"\nif not os.path.exists(save_dir):\n os.mkdir(save_dir)\n\nprev_height = None\nprev_action = None\nMary = agent()\nif os.path.exists('params_online_single.pkl'):\n Mary.net.load_state_dict(torch.load('params_online_single.pkl'))\n\nwhile True:\n # print(\"looping\")\n if not image is None:\n cv2.imshow(\"x\", image)\n # f = open(\"tes_map%d.txt\" % np.random.randint(100), \"w\")\n # for i in range(H):\n # for j in range(W):\n # f.write(\"%lf \" % height_map[i, j])\n # f.write(\"\\n\")\n # f.close()\n if not height_map is None:\n cv2.imshow(\"y\", height_map)\n # print(\"..\")\n key = cv2.waitKey(10)\n if key == 27: # wait for ESC\n subprocess.call(\"rosnode kill -a\", shell=True)\n preparation.clear()\n exit()\n # elif key == ord('q'):\n # clopen(1)\n # elif key == ord('p'):\n # moveThread = threading.Thread(target=move_to, args=(0.3, 0.15, 1.2))\n # moveThread.start()\n # elif key == ord('o'):\n # clopen(0)\n # elif key == ord('r'):\n # reset()\n if action == None:\n if result != None:\n\n if result != -1:\n Mary.backprop(prev_height, prev_action, result, \"\")\n torch.save(Mary.net.state_dict(), 'params_online_single.pkl')\n resfile = open(result_filename, \"a\")\n resfile.write(\"%d\\n\" % result)\n resfile.close()\n itercnt += 1\n if itercnt % iters_per_scene == 0:\n rearrange()\n prev_height = height_map\n action, iflearned = Mary.get_action(prev_height, 0.5)\n actionfile = open(action_filename, \"a\")\n actionfile.write(\"%d %d %d %d\\n\" % (action[0], action[1], action[2], iflearned))\n actionfile.close()\n np.save(os.path.join(save_dir, \"%d.npy\" % itercnt), prev_height)\n prev_action = action\n # cv2.imwrite(os.path.join(save_dir, \"%d.png\" % itercnt), height_map)\n \n", "sub_path": "online_grasping.py", "file_name": "online_grasping.py", "file_ext": "py", "file_size_in_byte": 16745, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.fromstring", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 62, "usage_type": "call"}, {"api_name": "preparation.init", "line_number": 83, "usage_type": "call"}, {"api_name": "preparation.spawn_camera", "line_number": 84, "usage_type": "call"}, {"api_name": "tf.TransformerROS", "line_number": 87, "usage_type": "call"}, {"api_name": "rospy.Duration", "line_number": 87, "usage_type": "call"}, {"api_name": "rospy.ServiceProxy", "line_number": 88, "usage_type": "call"}, {"api_name": "gazebo_msgs.srv.GetLinkState", "line_number": 88, "usage_type": "argument"}, {"api_name": "moveit_commander.roscpp_initialize", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 94, "usage_type": "attribute"}, {"api_name": "moveit_commander.RobotCommander", "line_number": 95, "usage_type": "call"}, {"api_name": "moveit_commander.PlanningSceneInterface", "line_number": 96, "usage_type": "call"}, {"api_name": "moveit_commander.MoveGroupCommander", "line_number": 97, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 99, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 100, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 100, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 101, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 102, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 103, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.CameraInfo", "line_number": 103, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 105, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.JointState", "line_number": 105, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 106, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.JointState", "line_number": 107, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 109, "usage_type": "call"}, {"api_name": "actionlib_msgs.msg.GoalStatusArray", "line_number": 109, "usage_type": "argument"}, {"api_name": "time.sleep", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 183, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 184, "usage_type": "attribute"}, {"api_name": "tf.transformations.quaternion_from_euler", "line_number": 186, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 186, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 222, "usage_type": "call"}, {"api_name": "time.time", "line_number": 223, "usage_type": "call"}, {"api_name": "time.time", "line_number": 224, "usage_type": "call"}, {"api_name": "time.time", "line_number": 226, "usage_type": "call"}, {"api_name": "time.time", "line_number": 228, "usage_type": "call"}, {"api_name": "time.time", "line_number": 229, "usage_type": "call"}, {"api_name": "time.time", "line_number": 230, "usage_type": "call"}, {"api_name": "cv2.EVENT_LBUTTONDOWN", "line_number": 243, "usage_type": "attribute"}, {"api_name": "preparation.pointFromDepth", "line_number": 254, "usage_type": "call"}, {"api_name": "preparation.heightMapFromPoint", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 263, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 266, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 272, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 273, "usage_type": "call"}, {"api_name": "cv2.setMouseCallback", "line_number": 275, "usage_type": "call"}, {"api_name": "rospy.ServiceProxy", "line_number": 278, "usage_type": "call"}, {"api_name": "gazebo_msgs.srv.GetModelState", "line_number": 278, "usage_type": "argument"}, {"api_name": "rospy.ServiceProxy", "line_number": 279, "usage_type": "call"}, {"api_name": "gazebo_msgs.srv.SetModelState", "line_number": 279, "usage_type": "argument"}, {"api_name": "rospy.ServiceProxy", "line_number": 280, "usage_type": "call"}, {"api_name": "gazebo_msgs.srv.DeleteModel", "line_number": 280, "usage_type": "argument"}, {"api_name": "time.sleep", "line_number": 283, "usage_type": "call"}, {"api_name": "addObj.add", "line_number": 299, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 303, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 316, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 356, "usage_type": "call"}, {"api_name": "gazebo_msgs.msg.ModelState", "line_number": 360, "usage_type": "call"}, {"api_name": "preparation.PointOnTable", "line_number": 386, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PoseStamped", "line_number": 390, "usage_type": "call"}, {"api_name": "geometry_msgs.msg", "line_number": 390, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 424, "usage_type": "call"}, {"api_name": "preparation.PointOnTable", "line_number": 439, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 455, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 485, "usage_type": "call"}, {"api_name": "os.path", "line_number": 485, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 486, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 491, "usage_type": "call"}, {"api_name": "os.path", "line_number": 491, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 497, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 505, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 507, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 509, "usage_type": "call"}, {"api_name": "preparation.clear", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 538, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 538, "usage_type": "call"}, {"api_name": "os.path", "line_number": 538, "usage_type": "attribute"}]} +{"seq_id": "640394996", "text": "#!/usr/bin/env python3\n#'description': ''The module is used to enumerate the following EC2 data from a set of regions on an AWS account: instances, security groups, elastic IP addresses, VPN customer gateways, dedicated hosts, network ACLs, NAT gateways, network interfaces, route tables, subnets, VPCs, and VPC endpoints. By default, all data will be enumerated, but if any arguments are passed in indicating what data to enumerate, only that specific data will be enumerated.',\n\nimport datetime\nimport argparse\nfrom copy import deepcopy\nfrom random import choice\nfrom botocore.exceptions import ClientError\nfrom core.secretfinder.utils import regex_checker, Color\nimport importlib\n\ntarget = ''\n\ntechnique_info = {\n 'blackbot_id': 'T1018',\n 'external_id': '',\n 'controller': 'ec2_enum_routetables',\n 'services': ['EC2'],\n 'prerequisite_modules': [],\n 'external_dependencies': [],\n 'arguments_to_autocomplete': ['--regions'],\n 'version': '1',\n 'aws_namespaces': [],\n 'last_updated_by': 'Blackbot, Inc. ' ,\n 'ttp_exec': '',\n 'ttp_mitigation': '',\n 'ttp_detection': '',\n 'name': 'Remote System Discovery',\n 'intent': 'Enumerates EC2 information.',\n\n}\n\nparser = argparse.ArgumentParser(add_help=False, description=technique_info['name'])\n\nparser.add_argument('--regions', required=False, default=None, help='One or more (comma separated) AWS regions in the format \"us-east-1\". Defaults to all session regions.')\n\ndef main(args, awsattack_main):\n args = parser.parse_args(args)\n\n import_path = 'ttp.src.ec2_enum_routetables_src'\n src_code = __import__(import_path, globals(), locals(), ['technique_info'], 0)\n importlib.reload(src_code)\n\n awsattack_main.chain = True\n return src_code.main(args, awsattack_main)\n\ndef summary(data, awsattack_main):\n results = []\n\n results.append(' Regions:')\n for region in data['regions']:\n results.append(' {}'.format(region))\n\n results.append('')\n\n if 'Instances' in data:\n results.append(' {} total instance(s) found.'.format(len(data['Instances'])))\n\n if 'SecurityGroups' in data:\n results.append(' {} total security group(s) found.'.format(len(data['SecurityGroups'])))\n\n if 'ElasticIPs' in data:\n results.append(' {} total elastic IP address(es) found.'.format(len(data['ElasticIPs'])))\n\n if 'VPNCustomerGateways' in data:\n results.append(' {} total VPN customer gateway(s) found.'.format(len(data['VPNCustomerGateways'])))\n\n if 'DedicatedHosts' in data:\n results.append(' {} total dedicated hosts(s) found.'.format(len(data['DedicatedHosts'])))\n\n if 'NetworkACLs' in data:\n results.append(' {} total network ACL(s) found.'.format(len(data['NetworkACLs'])))\n\n if 'NATGateways' in data:\n results.append(' {} total NAT gateway(s) found.'.format(len(data['NATGateways'])))\n\n if 'NetworkInterfaces' in data:\n results.append(' {} total network interface(s) found.'.format(len(data['NetworkInterfaces'])))\n\n if 'RouteTables' in data:\n results.append(' {} total route table(s) found.'.format(len(data['RouteTables'])))\n\n if 'Subnets' in data:\n results.append(' {} total subnets(s) found.'.format(len(data['Subnets'])))\n\n if 'VPCs' in data:\n results.append(' {} total VPC(s) found.'.format(len(data['VPCs'])))\n\n if 'VPCEndpoints' in data:\n results.append(' {} total VPC endpoint(s) found.'.format(len(data['VPCEndpoints'])))\n\n if 'LaunchTemplates' in data:\n results.append(' {} total launch template(s) found.'.format(len(data['LaunchTemplates'])))\n\n return '\\n'.join(results)\n", "sub_path": "ttp/ec2_enum_routetables.py", "file_name": "ec2_enum_routetables.py", "file_ext": "py", "file_size_in_byte": 3663, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 33, "usage_type": "call"}, {"api_name": "importlib.reload", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "641064118", "text": "\nimport unittest\nfrom docker import Client\nfrom docker.utils import kwargs_from_env\nfrom subprocess import Popen, PIPE\nimport os\nimport requests\nfrom urlparse import urlparse\nimport json\n\nclass ContainerTest(unittest.TestCase):\n def setUp(self):\n cmd = \"docker-machine env dev\"\n p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n (out, err) = p.communicate()\n\n for property in out.split(\"\\n\"):\n if property.startswith(\"export\"):\n split_property = property[7:].split(\"=\")\n os.environ[split_property[0]] = split_property[1].strip(\"\\\"\")\n\n self.cli = Client(**kwargs_from_env(assert_hostname=False))\n\n def test_thereShouldBeASiteImageInTheHost(self):\n imagesMap = self.cli.images()\n self.assertEquals(\"chonku/ubuntu-micro-service:latest\", imagesMap[0][\"RepoTags\"][0])\n\n def test_shouldHaveARunningContainerInTheHost(self):\n containersMap = self.cli.containers()\n totalContainers = 0\n for container in containersMap:\n if (\"ubuntu-micro-service\" in container[\"Image\"]):\n totalContainers = totalContainers + 1\n self.assertTrue(\"Up\" in str(container[\"Status\"]))\n\n self.assertEquals(3, totalContainers)\n\n def test_AllDockerContainersAreServingTheApplication(self):\n o = urlparse(os.environ['DOCKER_HOST'])\n dev_docker_machine_host = o.hostname\n self.assertTrue(self.isResponse200('http://' + dev_docker_machine_host + ':8081/time'))\n self.assertTrue(self.isResponse200('http://' + dev_docker_machine_host + ':8082/time'))\n self.assertTrue(self.isResponse200('http://' + dev_docker_machine_host + ':8083/time'))\n\n def test_applicationHasHealthCheckEnabled(self):\n o = urlparse(os.environ['DOCKER_HOST'])\n dev_docker_machine_host = o.hostname\n self.assertTrue(self.isHealthy('http://' + dev_docker_machine_host + ':9091/healthcheck'))\n self.assertTrue(self.isHealthy('http://' + dev_docker_machine_host + ':9092/healthcheck'))\n self.assertTrue(self.isHealthy('http://' + dev_docker_machine_host + ':9093/healthcheck'))\n\n def isResponse200(self, url):\n response = requests.get(url)\n return 200 == response.status_code\n\n def isHealthy(self, url):\n response = requests.get(url)\n self.assertEquals(200, response.status_code)\n content = self.parseJson(response.content)\n return True == content['KBAppHC']['healthy']\n\n def executeCommandOnContainer(self, cmd):\n containersMap = self.cli.containers()\n execInstance = self.cli.exec_create(containersMap[0][\"Id\"], cmd)\n return self.cli.exec_start(execInstance).rstrip()\n\n def getCurrentDirectory(self):\n os.path.dirname(os.path.realpath(__file__))\n\n def parseJson(self, string):\n return json.loads(string)\n", "sub_path": "DockerEnvironments/MicroService/microServiceTests/ctest.py", "file_name": "ctest.py", "file_ext": "py", "file_size_in_byte": 2876, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 14, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 14, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "docker.Client", "line_number": 22, "usage_type": "call"}, {"api_name": "docker.utils.kwargs_from_env", "line_number": 22, "usage_type": "call"}, {"api_name": "urlparse.urlparse", "line_number": 39, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 39, "usage_type": "attribute"}, {"api_name": "urlparse.urlparse", "line_number": 46, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 46, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 53, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 68, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "627400146", "text": "from django.shortcuts import render\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\n\nimport requests\n\nfrom .models import Tweet\n\n\ndef index(request, user_handle=None):\n \"\"\"\n Lists all tweets or a user's tweets if a user_handle is provided.\n\n Makes a request to the JSON API, stores the results or redirects if there's\n an error, and returns a list of all tweets as a rendered template.\n \"\"\"\n resp = requests.get(settings.TWEETS_URL)\n data = resp.json()\n\n if resp.status_code != 200 or 'error' in data:\n return HttpResponseRedirect(reverse('error'))\n\n for tweet in data:\n Tweet.objects.get_or_create_from_api_response(tweet)\n\n tweets = Tweet.objects.all()\n\n if user_handle:\n tweets = tweets.filter(user_handle__iexact=user_handle)\n\n ctx = {\n 'tweets': tweets,\n 'title': user_handle or 'Coke',\n }\n\n return render(request, 'index.html', ctx)\n", "sub_path": "app/apps/tweets/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 991, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.settings.TWEETS_URL", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 22, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Tweet.objects.get_or_create_from_api_response", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Tweet.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.Tweet", "line_number": 25, "usage_type": "name"}, {"api_name": "models.Tweet.objects.all", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Tweet.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.Tweet", "line_number": 27, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "294704013", "text": "from pprint import pprint\n\nimport requests\n\nhost = 'https://dev.gpt-3.whatilearened.today'\n\n\n# show scene list\n\ndef get_scenes():\n scenes = requests.get(f\"{host}/scenes\").json()['results']\n for scen in scenes:\n print('='*10)\n print(scen['name'])\n print('-' * 10)\n print(scen['text'])\n print('=' * 10,'\\n')\n\n return scenes\n\n\ndef create_session(name: str, scene: str = 'qa'):\n data = {\n \"name\": name,\n \"scene\": scene,\n }\n resp = requests.post(f\"{host}/sessions\", json=data).json()\n session_id = resp['id']\n\n def send_msg(msg):\n msg = requests.post(f\"{host}/sessions/{session_id}/message\", json={'message': msg}).json()\n print(msg['text'])\n return msg\n\n def show_history():\n results = requests.get(f\"{host}/sessions/{session_id}\").json()['history']\n for h in results:\n print('-' * 10)\n print(f\"type: {h['history_by']}\")\n print(f\"{h['text']}\")\n print('-' * 10)\n\n return send_msg, show_history\n\n\ndef add_scene(name, text):\n data = {\n \"text\": text,\n \"name\": name,\n }\n try:\n resp = requests.post(f\"{host}/scenes\", json=data).json()\n print(f'add {name} scene')\n except Exception as e:\n print(e)\n\n\ndef delete_scene(name):\n try:\n result = requests.delete(f\"{host}/scenes/{name}\")\n print(result.text)\n except Exception as e:\n print(e)\n\n\ndef main():\n get_scenes()\n name = 'test1'\n send_msg, show_history = create_session(name)\n\n q = 'Q: Who is Apple CEO?'\n print('\\n\\n', q)\n send_msg(q)\n\n q = 'Q: Who is MicroSoft CEO?'\n send_msg(q)\n\n print(f'\\n\\n{\"#\" * 6} print all history {\"#\" * 6}')\n pprint(show_history())\n\n\ndef custom_scene():\n # add custom scene\n new_scene = 'Make-Flask-Code'\n new_scene_text = \"\"\"\n\nQ: make return hello world app\n\nCODE:\nfrom flask import Flask\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n\nQ: make request x,y and return x+y app \n\nCODE:\nfrom flask import Flask\napp = Flask(__name__)\n\n@app.route('/{x}/{y}')\ndef sum_num():\n return x+y\n\n\nQ: make get list of services app \n\nCODE:\nfrom flask import Flask\napp = Flask(__name__)\n\n@app.route('/services')\ndef list_services():\n services = ['a','b']\n return services\n\n\nQ: get list of people names app\n\nCODE:\nfrom flask import Flask\napp = Flask(__name__)\n\n@app.route('/peoples')\ndef list_names():\n names = ['sinsky','kendra']\n return names\n \n\n\"\"\"\n add_scene(new_scene, new_scene_text)\n get_scenes()\n name = 'test2'\n send_msg, show_history = create_session(name, scene=new_scene)\n show_history()\n\n q = 'Q: make return bye app\\n'\n print(q)\n send_msg(q)\n\n print('\\n\\nfinish request')\n delete_scene(new_scene)\n\n\nif __name__ == '__main__':\n main()\n custom_scene()\n", "sub_path": "sample.py", "file_name": "sample.py", "file_ext": "py", "file_size_in_byte": 2878, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 52, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 60, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "335396693", "text": "# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom __future__ import print_function\nfrom awscrt import NativeResource\nimport gc\nimport inspect\nimport sys\nimport time\nimport types\nimport unittest\n\nTIMEOUT = 10.0\n\n\nclass NativeResourceTest(unittest.TestCase):\n \"\"\"\n Test fixture asserts there are no living NativeResources when a test completes.\n \"\"\"\n\n def setUp(self):\n NativeResource._track_lifetime = True\n\n def tearDown(self):\n gc.collect()\n\n # Native resources might need a few more ticks to finish cleaning themselves up.\n wait_until = time.time() + TIMEOUT\n while NativeResource._living and time.time() < wait_until:\n time.sleep(0.1)\n\n # Print out debugging info on leaking resources\n if NativeResource._living:\n print('Leaking NativeResources:')\n for i in NativeResource._living:\n print('-', i)\n\n # getrefcount(i) returns 4+ here, but 2 of those are due to debugging.\n # Don't show:\n # - 1 for WeakSet iterator due to this for-loop.\n # - 1 for getrefcount(i)'s reference.\n # But do show:\n # - 1 for item's self-reference.\n # - the rest are what's causing this leak.\n refcount = sys.getrefcount(i) - 2\n\n # Gather list of referrers, but don't show those created by the act of iterating the WeakSet\n referrers = []\n for r in gc.get_referrers(i):\n if isinstance(r, types.FrameType):\n frameinfo = inspect.getframeinfo(r)\n our_fault = (frameinfo.filename.endswith('_weakrefset.py') or\n frameinfo.filename.endswith('test/__init__.py'))\n if our_fault:\n continue\n\n referrers.append(r)\n\n print(' sys.getrefcount():', refcount)\n print(' gc.referrers():', len(referrers))\n for r in referrers:\n if isinstance(r, types.FrameType):\n print(' -', inspect.getframeinfo(r))\n else:\n print(' -', r)\n\n self.assertEqual(0, len(NativeResource._living))\n", "sub_path": "test/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2843, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "unittest.TestCase", "line_number": 26, "usage_type": "attribute"}, {"api_name": "awscrt.NativeResource._track_lifetime", "line_number": 32, "usage_type": "attribute"}, {"api_name": "awscrt.NativeResource", "line_number": 32, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 35, "usage_type": "call"}, {"api_name": "time.time", "line_number": 38, "usage_type": "call"}, {"api_name": "awscrt.NativeResource._living", "line_number": 39, "usage_type": "attribute"}, {"api_name": "awscrt.NativeResource", "line_number": 39, "usage_type": "name"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "awscrt.NativeResource._living", "line_number": 43, "usage_type": "attribute"}, {"api_name": "awscrt.NativeResource", "line_number": 43, "usage_type": "name"}, {"api_name": "awscrt.NativeResource._living", "line_number": 45, "usage_type": "attribute"}, {"api_name": "awscrt.NativeResource", "line_number": 45, "usage_type": "name"}, {"api_name": "sys.getrefcount", "line_number": 55, "usage_type": "call"}, {"api_name": "gc.get_referrers", "line_number": 59, "usage_type": "call"}, {"api_name": "types.FrameType", "line_number": 60, "usage_type": "attribute"}, {"api_name": "inspect.getframeinfo", "line_number": 61, "usage_type": "call"}, {"api_name": "types.FrameType", "line_number": 72, "usage_type": "attribute"}, {"api_name": "inspect.getframeinfo", "line_number": 73, "usage_type": "call"}, {"api_name": "awscrt.NativeResource._living", "line_number": 77, "usage_type": "attribute"}, {"api_name": "awscrt.NativeResource", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "611145923", "text": "\"\"\"Parser for VGG caffemodel.\"\"\"\n# Authors: Michael Eickenberg\n# Kyle Kastner\n# License: BSD 3 Clause\n\nfrom sklearn.externals import joblib\nfrom ...datasets import get_dataset_dir, download\nfrom .caffemodel import _parse_caffe_model, parse_caffe_model\nfrom ...utils import check_tensor, get_minibatch_indices\n\nfrom .googlenet_class_labels import get_googlenet_class_label\nfrom .googlenet_layer_names import get_googlenet_layer_names\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nimport os\nimport theano\nimport numpy as np\n\nVGG_PATH = get_dataset_dir(\"caffe/vgg\")\n\n\ndef fetch_vgg_protobuffer_file(caffemodel_file=None):\n \"\"\"Checks for existence of caffemodel protobuffer.\n Downloads it if it cannot be found.\"\"\"\n\n default_filename = os.path.join(VGG_PATH,\n \"VGG_ILSVRC_19_layers.caffemodel\")\n\n if caffemodel_file is not None:\n if os.path.exists(caffemodel_file):\n return caffemodel_file\n else:\n if os.path.exists(default_filename):\n import warnings\n warnings.warn('Did not find %s, but found and returned %s.' %\n (caffemodel_file, default_filename))\n return default_filename\n else:\n if os.path.exists(default_filename):\n return default_filename\n\n # We didn't find the file, let's download it. To the specified location\n # if specified, otherwise to the default place\n if caffemodel_file is None:\n caffemodel_file = default_filename\n if not os.path.exists(VGG_PATH):\n os.makedirs(VGG_PATH)\n\n url = \"http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/caffe/\"\n url += \"VGG_ILSVRC_19_layers.caffemodel\"\n download(url, caffemodel_file, progress_update_percentage=1)\n return caffemodel_file\n\n\ndef fetch_vgg_architecture(caffemodel_parsed=None, caffemodel_protobuffer=None):\n \"\"\"Fetch a pickled version of the caffe model, represented as list of\n dictionaries.\"\"\"\n\n default_filename = os.path.join(VGG_PATH, 'vgg.pickle')\n if caffemodel_parsed is not None:\n if os.path.exists(caffemodel_parsed):\n return joblib.load(caffemodel_parsed)\n else:\n if os.path.exists(default_filename):\n import warnings\n warnings.warn('Did not find %s, but found %s. Loading it.' %\n (caffemodel_parsed, default_filename))\n return joblib.load(default_filename)\n else:\n if os.path.exists(default_filename):\n return joblib.load(default_filename)\n\n # We didn't find the file: let's create it by parsing the protobuffer\n protobuf_file = fetch_vgg_protobuffer_file(caffemodel_protobuffer)\n model = _parse_caffe_model(protobuf_file)\n\n if caffemodel_parsed is not None:\n joblib.dump(model, caffemodel_parsed)\n else:\n joblib.dump(model, default_filename)\n\n return model\n\n\ndef create_theano_expressions(model=None, verbose=0):\n\n if model is None:\n model = fetch_vgg_architecture()\n\n layers, blobs, inputs, params = parse_caffe_model(\n model, convert_fc_to_conv=False, verbose=verbose)\n data_input = inputs['data']\n return blobs, data_input\n\n\ndef _get_fprop(output_layers=('prob',), model=None, verbose=0):\n\n if model is None:\n model = fetch_vgg_architecture(model)\n\n expressions, input_data = create_theano_expressions(model,\n verbose=verbose)\n to_compile = [expressions[expr] for expr in output_layers]\n\n return theano.function([input_data], to_compile)\n\n\n\n\nclass VGGClassifier(BaseEstimator):\n \"\"\"\n A classifier for cropped images using the VGG neural network.\n\n Parameters\n ----------\n top_n : integer, optional (default=5)\n How many classes to return, based on sorted class probabilities.\n\n output_strings : boolean, optional (default=True)\n Whether to return class strings or integer classes. Returns class\n strings by default.\n\n Attributes\n ----------\n crop_bounds_ : tuple, (x_left, x_right, y_lower, y_upper)\n The coordinate boundaries of the cropping box used.\n\n \"\"\"\n\n min_size = (224, 224)\n layer_names = get_googlenet_layer_names()\n\n def __init__(self, top_n=5, large_network=False, output_strings=True,\n transpose_order=(0, 3, 1, 2)):\n\n self.top_n = top_n\n self.large_network = large_network\n self.output_strings = output_strings\n self.transpose_order = transpose_order\n self.transform_function = _get_fprop()\n\n def fit(self, X, y=None):\n \"\"\"Passthrough for scikit-learn pipeline compatibility.\"\"\"\n return self\n\n def _predict_proba(self, X):\n x_midpoint = X.shape[2] // 2\n y_midpoint = X.shape[1] // 2\n\n x_lower_bound = x_midpoint - self.min_size[0] // 2\n if x_lower_bound <= 0:\n x_lower_bound = 0\n x_upper_bound = x_lower_bound + self.min_size[0]\n y_lower_bound = y_midpoint - self.min_size[1] // 2\n if y_lower_bound <= 0:\n y_lower_bound = 0\n y_upper_bound = y_lower_bound + self.min_size[1]\n self.crop_bounds_ = (x_lower_bound, x_upper_bound, y_lower_bound,\n y_upper_bound)\n\n res = self.transform_function(\n X[:, y_lower_bound:y_upper_bound,\n x_lower_bound:x_upper_bound, :].transpose(\n *self.transpose_order))[0]\n\n return res\n\n def predict(self, X):\n \"\"\"\n Classify a set of cropped input images.\n\n Returns the top_n classes.\n\n Parameters\n ----------\n X : array-like, shape = [n_images, height, width, color]\n or\n shape = [height, width, color]\n\n Returns\n -------\n T : array-like, shape = [n_images, top_n]\n\n Returns the top_n classes for each of the n_images in X.\n If output_strings is True, then the result will be string\n description of the class label.\n\n Otherwise, the returned values will be the integer class label.\n \"\"\"\n X = check_tensor(X, dtype=np.float32, n_dim=4)\n res = self._predict_proba(X)[0]\n indices=np.argsort(res)\n indices=indices[-self.top_n:]\n if self.output_strings:\n class_strings = np.empty_like(indices,\n dtype=object)\n for index, value in enumerate(indices.flat):\n class_strings.flat[index] = get_googlenet_class_label(value)\n return class_strings\n else:\n return indices\n\n def predict_proba(self, X):\n \"\"\"\n Prediction probability for a set of cropped input images.\n\n Returns the top_n probabilities.\n\n Parameters\n ----------\n X : array-like, shape = [n_images, height, width, color]\n or\n shape = [height, width, color]\n\n Returns\n -------\n T : array-like, shape = [n_images, top_n]\n\n Returns the top_n probabilities for each of the n_images in X.\n \"\"\"\n X = check_tensor(X, dtype=np.float32, n_dim=4)\n res = self._predict_proba(X)[:, :, 0, 0]\n return np.sort(res, axis=1)[:, -self.top_n:]\n", "sub_path": "sklearn_theano/feature_extraction/caffe/vgg.py", "file_name": "vgg.py", "file_ext": "py", "file_size_in_byte": 7377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "datasets.get_dataset_dir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 48, "usage_type": "call"}, {"api_name": "datasets.download", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 63, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 63, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 69, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 72, "usage_type": "name"}, {"api_name": "caffemodel._parse_caffe_model", "line_number": 76, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 79, "usage_type": "name"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 81, "usage_type": "name"}, {"api_name": "caffemodel.parse_caffe_model", "line_number": 91, "usage_type": "call"}, {"api_name": "theano.function", "line_number": 106, "usage_type": "call"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 111, "usage_type": "name"}, {"api_name": "googlenet_layer_names.get_googlenet_layer_names", "line_number": 132, "usage_type": "call"}, {"api_name": "utils.check_tensor", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 191, "usage_type": "attribute"}, {"api_name": "numpy.argsort", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 196, "usage_type": "call"}, {"api_name": "googlenet_class_labels.get_googlenet_class_label", "line_number": 199, "usage_type": "call"}, {"api_name": "utils.check_tensor", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 222, "usage_type": "attribute"}, {"api_name": "numpy.sort", "line_number": 224, "usage_type": "call"}]} +{"seq_id": "376036294", "text": "import numpy as np\nimport statsmodels.api as sm\nfrom statsmodels.stats.outliers_influence import summary_table\nimport scipy.stats as st\n\nfrom . import _utils\nfrom ._common_plot import CommonPlot\n\n\ndef _fit_reg(fit_reg, ci, ax, x, y, data, color, line_kws):\n if not fit_reg:\n return None\n if ci is None:\n ci = 0\n if ci < 0 or ci >= 100:\n raise ValueError('ci must be between 0 and 100 or `None`')\n\n if line_kws is None:\n line_kws = {}\n\n if 'lw' not in line_kws:\n line_kws['lw'] = 3\n\n X = data[x].values\n if len(X) == 1:\n return None\n idx_order = X.argsort()\n y = data[y].values\n if len(X) == 2:\n ax.plot(X, y, color=color, **line_kws)\n return None\n X = sm.add_constant(X)\n\n # if all x's are the same value, there can be no regression line\n if X.shape[1] == 1:\n return 1\n ols = sm.OLS(y, X).fit()\n pred_obj = ols.get_prediction()\n pred = pred_obj.predicted_mean[idx_order]\n try:\n ax.plot(X[idx_order, 1], pred, color=color, **line_kws)\n except IndexError:\n print(f\"col is {x}\")\n print(X.shape)\n print(data[x].values)\n print(X)\n\n if ci != 0:\n st, data, ss2 = summary_table(ols, alpha=1 - ci / 100)\n ax.fill_between(X[idx_order, 1], data[idx_order, 4], data[idx_order, 5],\n alpha=.3, color=color)\n\n\ndef jointplot(x, y, data=None, hue=None, row=None, col=None, kind='scatter', figsize=None,\n wrap=None, s=None, fit_reg=False, ci=95, rot=0, sharex=True, sharey=True, xlabel=None,\n ylabel=None, xlim=None, ylim=None, xscale='linear', yscale='linear', title=None,\n scatter_kws=None, line_kws=None):\n \"\"\"\n Creates a plot between the raw numeric variables `x` and `y`. No\n aggregation is performed. The default plot is a scatterplot. Use\n the parameter `kind` to create these other plots:\n * line\n * kde\n * bar\n\n Within a single plot, use `hue` to subdivide and color points/lines.\n Fit a regression line with confidence bands by setting `fit_reg`\n to `True`\n\n Parameters\n ----------\n x: str\n Column name of numeric variable for x-axis\n\n y: str\n Column name of numeric variable for y-axis\n\n data: Pandas or Dexplo DataFrame\n DataFrame whos column names may be used for x, y, hue, row, col, and s\n\n hue: str\n Column name of string/categorical variable who's unique values split\n are used to color points\n\n row: str\n Column name of string/categorical variable who's unique values\n split data into separate plots by row\n\n col: str\n Column name of string/categorical variable who's unique values\n split data int separate plots by column\n\n kind: str\n Kind of plot to be created. Either 'scatter', 'line', 'kde', 'bar'\n\n figsize: 2-item tuple of ints\n Determines figsize of figure. If left as `None`, the figsize will\n be automatically set based on the number of rows and columns\n\n wrap: int\n Used whenever exactly one of `row` or `col` is given. Starts a new\n row/column for every `wrap` plots\n\n s: int or str\n If `s` is an int, then all markers will be this size in points.\n If `s` is a str, then it corresponds to a numeric column in the\n DataFrame that contains the size of each point.\n\n fit_reg: bool\n When `True`, fit a regression line. By default it is False\n\n ci: int [0, 100)\n Confidence interval of regression line\n\n rot: int\n Long labels will be automatically wrapped, but you can still use\n this parameter to rotate x-tick labels. Only applied to strings.\n\n sharex: bool, 'row', or 'col'\n Determines whether the x-axis limits will be shared for each plot.\n Use False so that each plot has its own unique limits or 'row'/'col'\n for all rows/cols to share their limits. Default is True\n\n sharey: bool, 'row', or 'col'\n Determines whether the y-axis limits will be shared for each plot.\n Use False so that each plot has its own unique limits or 'row'/'col'\n for all rows/cols to share their limits. Default is Tru\n\n xlabel: str\n Label used for x-axis on figures with a single plot\n\n ylabel: str\n Label used for y-axis on figures with a single plot\n\n xlim: 2-item tuple of numerics\n Determines x-axis limits for figures with a single plot\n\n ylim: 2-item tuple of numerics\n Determines y-axis limits for figures with a single plot\n\n xscale: {'linear', 'log', 'symlog', 'logit'}\n Sets the scale of the x-axis.\n\n yscale: {'linear', 'log', 'symlog', 'logit'}\n Sets the scale of the y-axis\n\n title: str\n Sets the figure title NOT the Axes title\n\n scatter_kws: dict\n Extra keyword parameters passed to Matplotlib's Axes.scatter function\n\n line_kws: dict\n Extra keyword parameters passed to Matplotlib's Axes.plot function\n\n Returns\n -------\n A Matplotlib Axes when making a single plot or a one item tuple of a\n Matplotlib Figure when using `row` or `col`.\n \"\"\"\n\n return JointPlot(x, y, data, hue, row, col, kind, figsize, wrap, s, fit_reg, ci, rot, sharex,\n sharey, xlabel, ylabel, xlim, ylim, xscale, yscale, title, scatter_kws,\n line_kws).plot()\n\n\nclass JointPlot(CommonPlot):\n\n def __init__(self, x, y, data, hue, row, col, kind, figsize,\n wrap=None, s=None, fit_reg=False, ci=95, rot=0, sharex=True, sharey=True, xlabel=None,\n ylabel=None, xlim=None, ylim=None, xscale='linear', yscale='linear', title=None,\n scatter_kws=None, line_kws=None):\n self.validate_figsize(figsize)\n self.validate_data(data)\n\n param_dict = {'x': x, 'y': y, 'hue': hue, 'row': row, 'col': col, 's': s}\n self.validate_column_names(param_dict)\n\n self.validate_plot_args(wrap, kind)\n self.validate_mpl_args(rot, title, sharex, sharey, xlabel, ylabel,\n xlim, ylim, xscale, yscale)\n self.get_uniques()\n self.fit_reg = fit_reg\n self.ci = ci\n self.set_kws(scatter_kws, line_kws)\n self.single_plot = self.is_single_plot()\n self.plot_func = self.get_plotting_func()\n self.no_legend = True\n\n def validate_plot_args(self, wrap, kind):\n if wrap is not None:\n if not isinstance(wrap, int):\n raise TypeError('`wrap` must either be None or an integer. '\n f'You passed {type(wrap)}')\n\n if kind not in ('scatter', 'line', 'kde', 'bar'):\n raise ValueError(\"`kind` must be either 'scatter', 'line', 'kde', 'bar'\")\n\n self.wrap = wrap\n self.kind = kind\n\n def set_kws(self, scatter_kws, line_kws):\n if scatter_kws is None:\n self.scatter_kws = {}\n else:\n self.scatter_kws = scatter_kws\n\n if line_kws is None:\n self.line_kws = {}\n else:\n self.line_kws = line_kws\n\n def get_uniques(self):\n if self.hue:\n self.all_hues = np.sort(self.data[self.hue].unique())\n if self.row:\n self.all_rows = np.sort(self.data[self.row].unique())\n if self.col:\n self.all_cols = np.sort(self.data[self.col].unique())\n\n def get_plotting_func(self):\n if self.kind == 'scatter' and self.data[self.x].dtype.kind == 'M':\n return self.date_scatter\n return getattr(self, self.kind + 'plot')\n\n def apply_single_plot_changes(self, ax):\n if self.hue:\n ax.legend()\n\n ax.set_xlabel(self.x)\n ax.set_ylabel(self.y)\n\n if self.kind == 'kde' and self.orig_figsize is None:\n ax.figure.set_size_inches(8, 6)\n\n def apply_figure_changes(self, fig):\n if self.hue:\n handles, labels = fig.axes[0].get_legend_handles_labels()\n fig.legend(handles, labels, bbox_to_anchor=(1.01, .5), loc='center left')\n\n fig.text(.5, -.01, self.x, ha='center', va='center')\n fig.text(-.01, .5, self.y, ha='center', va='center', rotation=90)\n\n def plot(self):\n fig, axes = self.create_figure()\n if not (self.hue or self.row or self.col):\n ax = self.plot_only_xy(axes, self.data)\n elif self.hue and not (self.row or self.col):\n ax = self.plot_hue_xy(axes, self.data)\n elif bool(self.row) != bool(self.col):\n self.plot_row_or_col(axes)\n elif self.row and self.col:\n self.plot_row_and_col(axes)\n\n if self.single_plot:\n self.apply_single_plot_changes(ax)\n else:\n self.apply_figure_changes(fig)\n self.align_axes(axes)\n self.remove_yticklabels(axes)\n self.remove_xticklabels(axes)\n\n self.wrap_labels(fig)\n self.remove_ax(axes)\n fig.tight_layout()\n self.add_last_tick_labels(fig)\n\n if self.single_plot:\n return ax\n return fig,\n\n def plot_only_xy(self, ax, data):\n self.plot_func(ax, data)\n return ax\n\n def plot_hue_xy(self, ax, data):\n hue_map = _utils._map_val_to_color(self.all_hues)\n for val, sub_df in data.groupby(self.hue):\n self.plot_func(ax, sub_df, label=val, c=hue_map[val])\n return ax\n\n def plot_row_or_col(self, axes):\n split_by = self.row or self.col\n g = self.data.groupby(split_by)\n how = 'F' if self.row else 'C'\n axes_flat = axes.flatten(how)\n for i, (ax, (val, sub_df)) in enumerate(zip(axes_flat, g)):\n if not self.hue:\n self.plot_only_xy(ax, sub_df)\n else:\n self.plot_hue_xy(ax, sub_df)\n ax.set_title(val)\n\n def plot_row_and_col(self, axes):\n g = self.data.groupby([self.row, self.col])\n axes_flat = axes.flatten()\n groups = [(r, c) for r in self.all_rows for c in self.all_cols]\n\n for ax, group in zip(axes_flat, groups):\n ax.set_title(f'{group[0]} | {group[1]}')\n if group not in g.groups:\n continue\n else:\n sub_df = g.get_group(group)\n if not self.hue:\n self.plot_only_xy(ax, sub_df)\n else:\n self.plot_hue_xy(ax, sub_df)\n\n def scatterplot(self, ax, data, **kwargs):\n label = kwargs.get('label', '')\n c = kwargs.get('c', None)\n scat = ax.scatter(self.x, self.y, data=data, s=self.s,\n label=label, c=c, **self.scatter_kws)\n _fit_reg(self.fit_reg, self.ci, ax, self.x, self.y, data,\n scat.get_facecolor()[0], self.line_kws)\n return ax\n\n def date_scatter(self, ax, data, **kwargs):\n label = kwargs.get('label', '')\n c = kwargs.get('c', None)\n ax.plot_date(self.x, self.y, data=data, label=label, c=c, **self.scatter_kws)\n return ax\n\n def lineplot(self, ax, data, **kwargs):\n label = kwargs.get('label', '')\n ax.plot(self.x, self.y, data=data, label=label, **self.line_kws)\n return ax\n\n def kdeplot(self, ax, data, **kwargs):\n x, y = data[self.x].values, data[self.y].values\n xmin, xmax = x.min(), x.max()\n ymin, ymax = y.min(), y.max()\n\n # Peform the kernel density estimate\n xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]\n positions = np.vstack([xx.ravel(), yy.ravel()])\n values = np.vstack([x, y])\n kernel = st.gaussian_kde(values)\n f = np.reshape(kernel(positions).T, xx.shape)\n\n ax.contourf(xx, yy, f, cmap='Blues')\n return ax\n\n def barplot(self, ax, data, **kwargs):\n label = kwargs.get('label', '')\n ax.bar(self.x, self.y, data=data, label=label)\n return ax\n", "sub_path": "dexplot/_joint.py", "file_name": "_joint.py", "file_ext": "py", "file_size_in_byte": 11871, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "statsmodels.api.add_constant", "line_number": 32, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 32, "usage_type": "name"}, {"api_name": "statsmodels.api.OLS", "line_number": 37, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 37, "usage_type": "name"}, {"api_name": "scipy.stats", "line_number": 49, "usage_type": "name"}, {"api_name": "statsmodels.stats.outliers_influence.summary_table", "line_number": 49, "usage_type": "call"}, {"api_name": "_common_plot.CommonPlot", "line_number": 167, "usage_type": "name"}, {"api_name": "numpy.sort", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.mgrid", "line_number": 336, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 338, "usage_type": "call"}, {"api_name": "scipy.stats.gaussian_kde", "line_number": 339, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 339, "usage_type": "name"}, {"api_name": "numpy.reshape", "line_number": 340, "usage_type": "call"}]} +{"seq_id": "45288868", "text": "import jieba\r\nfrom numpy import *\r\nimport math\r\nimport Dir\r\nimport src.tools.FileTools as tools\r\nfrom src.ResultProcess import ResultPropress as RP\r\n\r\nclass ROUGE:\r\n\r\n def get_common_string(self,array,b,lhs,i,j):\r\n if i ==0 or j ==0:\r\n return\r\n if b[i][j] ==2:\r\n self.get_common_string(array,b,lhs,i-1,j-1)\r\n array.append(lhs[i-1])\r\n elif b[i][j] ==1:\r\n self.get_common_string(array,b,lhs,i-1,j)\r\n else:\r\n self.get_common_string(array,b,lhs,i,j-1)\r\n\r\n def get_max(self,matrix):\r\n max_value =0\r\n for i in range(matrix.__len__()):\r\n for j in range(matrix[i].__len__()):\r\n if matrix[i][j] > max_value:\r\n max_value = matrix[i][j]\r\n return int(max_value)\r\n\r\n # def __count_ngram(self,list1,list2):\r\n # commoncount=0\r\n # for word in list1:\r\n # if word in list2:\r\n # commoncount+=1\r\n # return commoncount\r\n\r\n def seperate_words(self,sentence,chinese = True):\r\n if chinese :\r\n return list(jieba.cut(sentence))\r\n else:\r\n return sentence.split(\" \")\r\n\r\n ### 输入 abstract: list [ sentence1, sentece2, ... , sentencen]\r\n def createNgram(self,abstract,n,chinese = True):\r\n # print(abstract)\r\n result ={}\r\n words,count = [],0\r\n for sentence in abstract:\r\n # print('sentence',sentence)\r\n words.extend(self.seperate_words(sentence,chinese))\r\n # print(n)\r\n for i in range(words.__len__()-n+1):\r\n ### [i+1,..,i+n-1]\r\n gram = words[i]\r\n for j in range(i+1,i+n):\r\n gram+=\"-\"+words[j]\r\n if gram not in result.keys():\r\n result[gram]=1\r\n else:\r\n result[gram]+=1\r\n count += 1\r\n result[\"allcount\"] = count\r\n # for key in result.keys():\r\n # print(key,result[key])\r\n return result\r\n\r\n def createSkipNgram(self, abstract, n, chinese=True,unigram = True):\r\n # print(abstract)\r\n result = {}\r\n words, count = [], 0\r\n for sentence in abstract:\r\n words.extend(self.seperate_words(sentence, chinese))\r\n if unigram:\r\n for word in words:\r\n if word not in result.keys():\r\n result[word]= 1\r\n else:\r\n result[word]+=1\r\n count+=1\r\n for i in range(words.__len__()):\r\n gram = words[i]\r\n for j in range(i+1, i + n+2):\r\n if j >= words.__len__():\r\n break\r\n tmp = gram+ \"-\" + words[j]\r\n # print(i,j)\r\n if tmp not in result.keys():\r\n result[tmp] = 1\r\n else:\r\n result[tmp] += 1\r\n count += 1\r\n result[\"allcount\"] = count\r\n # for key in result:\r\n # print(key,result[key])\r\n return result\r\n\r\n def ngramScore(self,model_grams,standard_grams):\r\n hit= 0\r\n for gram in standard_grams.keys():\r\n if gram!= \"allcount\":\r\n h = 0\r\n if gram in model_grams.keys():\r\n h = model_grams[gram] if model_grams[gram] <= standard_grams[gram] else standard_grams[gram]\r\n hit +=h\r\n if standard_grams[\"allcount\"] != 0:\r\n score = hit / standard_grams[\"allcount\"]\r\n else :\r\n score = 0\r\n return model_grams[\"allcount\"] , standard_grams[\"allcount\"], hit,score\r\n\r\n ## 输入的摘要:list[sentence1,sentence2,....]\r\n ## 输入N的值\r\n ## 输出 rouge-n的值\r\n def rouge_n(self,abstract,standard_abstract,n=1,chinese= True):\r\n # print(type(abstract))\r\n if isinstance(abstract,str):\r\n abstract = abstract.strip()\r\n abstract = abstract.split(\"\\n\")\r\n if isinstance(standard_abstract,str):\r\n standard_abstract = standard_abstract.strip()\r\n standard_abstract = standard_abstract.split(\"\\n\")\r\n\r\n model_grams = self.createNgram(abstract,n,chinese)\r\n standard_grams = self.createNgram(standard_abstract,n,chinese)\r\n result = self.ngramScore(model_grams,standard_grams)\r\n return result\r\n\r\n def rouge_skip(self,abstract,standard_abstract,n=4,chinese= True):\r\n model_grams = self.createSkipNgram(abstract, n, chinese)\r\n standard_grams = self.createSkipNgram(standard_abstract, n, chinese)\r\n result = self.ngramScore(model_grams, standard_grams)\r\n return result\r\n\r\n def rouge_l(self,abstract,standard_abstract,n=1,chinese= True):\r\n model_grams = self.createSkipNgram(abstract, n, chinese)\r\n standard_grams = self.createSkipNgram(standard_abstract, n, chinese)\r\n result = self.ngramScore(model_grams, standard_grams)\r\n return result\r\n\r\n def compute_result(self,result_list,model =\"A\",alpha = 0.95):\r\n gramScoreBest = 0\r\n totalhit, totalGramCountP, totalGramCount = 0, 0, 0\r\n for result in result_list:\r\n if model == \"A\":\r\n totalhit += result[2]\r\n totalGramCount += result[1]\r\n totalGramCountP += result[0]\r\n elif model == \"B\":\r\n if result[3] > gramScoreBest:\r\n gramScoreBest = result[3]\r\n totalhit = result[2]\r\n totalGramCount = result[1]\r\n totalGramCountP = result[0]\r\n else:\r\n totalhit += result[2]\r\n totalGramCount += result[1]\r\n totalGramCountP += result[0]\r\n gramScore, gramScoreP, gramScoref = 0, 0, 0\r\n if totalGramCount != 0:\r\n gramScore = totalhit / totalGramCount\r\n else:\r\n gramScore = 0\r\n if totalGramCountP != 0:\r\n gramScoreP = totalhit / totalGramCountP\r\n else:\r\n gramScoreP = 0\r\n if (1 - alpha) * gramScoreP + alpha * gramScore > 0:\r\n gramScoref = (gramScoreP * gramScore) / ((1 - alpha) * gramScoreP + alpha * gramScore)\r\n else:\r\n gramScoref = 0\r\n return totalhit, totalGramCount, totalGramCountP, gramScore, gramScoreP, gramScoref\r\n\r\n ### 生成摘要数量需和标准摘要数量一致\r\n ### model :A (平均得分),B(最好得分)\r\n ### func: rouge_n: ngram rouge_skip : skip ngarm rouge_l : lcs\r\n def compute_rouge(self,abstract_list,stand_abstract_list,n=1,model = \"A\",alpha = 0.5,chinese = True,funcT = rouge_n):\r\n result_list = []\r\n if abstract_list.__len__() != stand_abstract_list.__len__():\r\n return None\r\n else:\r\n for i in range(abstract_list.__len__()):\r\n result_list.append(funcT(self,abstract_list[i],stand_abstract_list[i],n=n,chinese = chinese))\r\n return self.compute_result(result_list,model,alpha)\r\n\r\n ## 输入的摘要:list[sentence1,sentence2,....]\r\n ## b = 准确率和召回率的因子\r\n ## 公式:F= (1+b^2)r*p/(r+b^2*p)\r\n ## 输出rouge-l的值\r\n def rouge_l(self,abstract,standard_abstract,b=-1):\r\n lcs,n,m,abstaabstact_count_complete =0,0,0,False\r\n for i in range(standard_abstract.__len__()):\r\n words=set()\r\n standard_abstract_sentence = standard_abstract[i]\r\n standard_abstract_words = self.seperate_words(standard_abstract_sentence)\r\n for j in range(abstract.__len__()):\r\n abstract_sentence = abstract[j]\r\n abstract_words = self.seperate_words(abstract_sentence)\r\n inter_words = self.longest_common_subsequence(abstract_words, standard_abstract_words)\r\n words = words | set(inter_words)\r\n if not abstaabstact_count_complete:\r\n n += abstract_words.__len__()\r\n abstaabstact_count_complete = True\r\n lcs += words.__len__()\r\n m += standard_abstract_words.__len__()\r\n # print(lcs,n,m)\r\n p = lcs / n\r\n r = lcs / m\r\n # print(p,r)\r\n f = (1 + b * b) * p * r / (r + b * b * p)\r\n if b == -1:\r\n f = r\r\n return lcs,n,f\r\n\r\n def de_func(self,w):\r\n return math.sqrt(w)\r\n\r\n ## ABANDED\r\n ## 输入的摘要:list[sentence1,sentence2,....]\r\n ## b = 准确率和召回率的因子\r\n ## 公式:F= (1+b^2)r*p/(r+b^2*p)\r\n ## 输出rouge-w的值\r\n # def rouge_w(self,abstract,standard_abstract,b =-1,function = func,de_funcrion = de_func()):\r\n # lcs, n, m, abstaabstact_count_complete = 0, 0, 0, False\r\n # for i in range(standard_abstract.__len__()):\r\n # words = set()\r\n # standard_abstract_sentence = standard_abstract[i]\r\n # standard_abstract_words = self.seperate_words(standard_abstract_sentence)\r\n # for j in range(abstract.__len__()):\r\n # abstract_sentence = abstract[j]\r\n # abstract_words = self.seperate_words(abstract_sentence)\r\n # words = words | set(self.longest_common_subsequence(abstract_words, standard_abstract_words))\r\n # if not abstaabstact_count_complete:\r\n # n += abstract_words.__len__()\r\n # abstaabstact_count_complete = True\r\n # lcs += function(words.__len__())\r\n # m += function(standard_abstract_words.__len__())\r\n # # print(lcs,n,m)\r\n # p = de_funcrion((lcs / n))\r\n # r = de_funcrion(lcs / m)\r\n # print(p, r)\r\n # f = (1 + b * b) * p * r / (r + b * b * p)\r\n # if b == -1:\r\n # f = r\r\n # return f\r\n\r\n\r\n ## 输入的摘要:list[sentence1,sentence2,....]\r\n ## b = 准确率和召回率的因子\r\n ## 公式:F= (1+b^2)r*p/(r+b^2*p)\r\n ## 输出rouge-w的值\r\n def rouge_s(self,abstract,standard_abstract,b =-1,max_skip =4,chinese= True):\r\n abstract_words,standard_abstract_words =[],{}\r\n for k in range(standard_abstract.__len__()):\r\n sentence = standard_abstract[k]\r\n words = rouge.seperate_words(sentence, chinese)\r\n standard_abstract_words[k]=[]\r\n for i in range(words.__len__()):\r\n tmp = \"\"\r\n for j in range(1, max_skip):\r\n if i + j < words.__len__():\r\n tmp = words[i] + \"_\" + words[i + j]\r\n standard_abstract_words[k].append(tmp)\r\n else:\r\n break\r\n\r\n for sentence in abstract:\r\n words = rouge.seperate_words(sentence,chinese)\r\n for i in range(words.__len__()):\r\n tmp = \"\"\r\n for j in range(1, max_skip):\r\n if i + j < words.__len__():\r\n tmp = words[i] + \"_\" + words[i + j]\r\n abstract_words.append(tmp)\r\n else:\r\n break\r\n\r\n count_match,reference_match =0,0\r\n for i in range(standard_abstract_words.__len__()):\r\n reference_match+=standard_abstract_words[i].__len__()\r\n for word in standard_abstract_words[i]:\r\n if word in abstract_words:\r\n count_match+=1\r\n # print(count_match,n)\r\n return count_match,reference_match,count_match/reference_match\r\n\r\n\r\n def demo_getlcs(self):\r\n rouge = ROUGE()\r\n lhs = [\"police\", \"killed\", \"ended\", \"the\", \"gunman\"]\r\n rhs = [\"police\", \"ended\", \"the\", \"gunman\"]\r\n result = rouge.longest_common_subsequence(lhs, rhs)\r\n print(result)\r\n\r\n def demo_rouge_n(self):\r\n rouge = ROUGE()\r\n # standard_abstract,abstract = [],[]\r\n # path_1 = Dir.resource + \"extradata\\\\test\\\\Guess_Summ_1.txt\"\r\n # path_2 = Dir.resource + \"extradata\\\\test\\\\Guess_Summ_2.txt\"\r\n # path_11 = Dir.resource + \"extradata\\\\test\\\\Ref_Summ_1_1.txt\"\r\n # path_21 = Dir.resource + \"extradata\\\\test\\\\Ref_Summ_2_1.txt\"\r\n # guess1 = tools.read_lines(path_1)\r\n # guess2 = tools.read_lines(path_2)\r\n # ref_1 = tools.read_lines(path_11)\r\n # ref_2 = tools.read_lines(path_21)\r\n standard_abstract = [[\"man kill police\",\"police man kill police\"]]\r\n abstract = [[\"police police man kill police\"]]\r\n result1 = rouge.compute_rouge(abstract, standard_abstract,n=1,chinese = False)\r\n print(result1)\r\n result2 = rouge.compute_rouge(abstract, standard_abstract, n=2,chinese = False)\r\n\r\n print(result2)\r\n\r\n def demo_rouge_l(self):\r\n standard_abstract = [\"w1 w2 w3 w4 w5\"]\r\n abstract =[\"w1 w2 w6 w7 w8\",\r\n \"w1 w3 w8 w9 w5 w0\"]\r\n resul = self.rouge_l(abstract,standard_abstract)\r\n print(resul)\r\n\r\n def demo_rouge_s(self):\r\n standard_abstract =[\"police killed the gunman\"]\r\n abstract = [\"police kill the gunman\"]\r\n abstract1 = [\"the gunman kill police\"]\r\n abstract2 = [\"the gunman police killed\"]\r\n\r\n result = self.rouge_n(abstract, standard_abstract, 1)\r\n print(\"n = 1 \", result)\r\n result = self.rouge_n(abstract1, standard_abstract, 1)\r\n print(\"n = 1 \", result)\r\n result = self.rouge_n(abstract2, standard_abstract, 1)\r\n print(\"n = 1 \", result)\r\n\r\n result = self.rouge_n(abstract, standard_abstract, 2)\r\n print(\"n = 2 \", result)\r\n result = self.rouge_n(abstract1, standard_abstract, 2)\r\n print(\"n = 2 \", result)\r\n result = self.rouge_n(abstract2, standard_abstract, 2)\r\n print(\"n = 2 \", result)\r\n #\r\n result = self.rouge_l(abstract, standard_abstract)\r\n print(\"rouge_l\", result)\r\n result = self.rouge_l(abstract1, standard_abstract)\r\n print(\"rouge_l\", result)\r\n result = self.rouge_l(abstract2, standard_abstract)\r\n print(\"rouge_l\", result)\r\n\r\n result = self.rouge_s(abstract, standard_abstract)\r\n print(\"rouge_s \", result)\r\n result = self.rouge_s(abstract1, standard_abstract)\r\n print(\"rouge_s \", result)\r\n result = self.rouge_s(abstract2, standard_abstract)\r\n print(\"rouge_s \", result)\r\n\r\n ### 输入: rouge_list = [ [rouge_recall, rouge_precision, rouge_f],...[rouge_recall, rouge_precision, rouge_f] ]\r\n ###\r\n def average(self,rouge_list,option=1):\r\n average_recall,average_precision,average_f =0,0,0\r\n for tmp in rouge_list:\r\n average_recall += tmp[0]\r\n average_precision += tmp[1]\r\n average_f += tmp[2]\r\n\r\n\r\n def eval(self,abstract_dir, standard_dir,n = [1,2]):\r\n guess_summary_list = RP.get_file_path(abstract_dir)\r\n ref_summ_list = RP.get_file_path_ref(standard_dir)\r\n # print(guess_summary_list)\r\n # print(ref_summ_list)\r\n assay_guess=[]\r\n assay_ref = []\r\n for i in range(guess_summary_list.__len__()):\r\n assay_guess.append(tools.read(guess_summary_list[i]))\r\n for i in range(ref_summ_list.__len__()):\r\n # for k in range(ref_summ_list)\r\n tmp = [tools.read(ref_summ_list[i][k]) for k in range(ref_summ_list[i].__len__())]\r\n assay_ref.append(tmp)\r\n # print(assay_ref.__len__())\r\n result =[]\r\n for r_n in n:\r\n recall_list =[]\r\n for i in range(assay_guess.__len__()):\r\n recall_list.append(sum([round(self.rouge_n(assay_guess[i],assay_ref[i][j],n= r_n,chinese=False)[-1],5) for j in range(assay_ref[i].__len__())]))\r\n value = sum(recall_list),recall_list.__len__(),sum(recall_list)/recall_list.__len__()\r\n result.append(value[-1])\r\n # print(sum(recall_list)/recall_list.__len__())\r\n return str(result)\r\n\r\n\r\n\r\n\r\nrouge = ROUGE()\r\nguess_ = []\r\nref_guess = []\r\n\r\n\r\n# rouge.demo_rouge_n()\r\n# rouge.demo_rouge_s()\r\n# s1 = list(\"abcbdab\")\r\n# s2 = list(\"bdcaba\")\r\n# rouge.longest_common_subsequence(s1,s2)\r\n\r\n# abstract = [\"police killed the gunman\"]\r\n# max_skip = 4\r\n# abstract_words =[]\r\n", "sub_path": "src/evaluation/ROUGE.py", "file_name": "ROUGE.py", "file_ext": "py", "file_size_in_byte": 16149, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "jieba.cut", "line_number": 38, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 214, "usage_type": "call"}, {"api_name": "src.ResultProcess.ResultPropress.get_file_path", "line_number": 364, "usage_type": "call"}, {"api_name": "src.ResultProcess.ResultPropress", "line_number": 364, "usage_type": "name"}, {"api_name": "src.ResultProcess.ResultPropress.get_file_path_ref", "line_number": 365, "usage_type": "call"}, {"api_name": "src.ResultProcess.ResultPropress", "line_number": 365, "usage_type": "name"}, {"api_name": "src.tools.FileTools.read", "line_number": 371, "usage_type": "call"}, {"api_name": "src.tools.FileTools", "line_number": 371, "usage_type": "name"}, {"api_name": "src.tools.FileTools.read", "line_number": 374, "usage_type": "call"}, {"api_name": "src.tools.FileTools", "line_number": 374, "usage_type": "name"}]} +{"seq_id": "48604117", "text": "\"\"\"Validate a single address or multiple addresses.\"\"\"\nfrom typing import Any, Dict\n\nfrom ..jsonrpc import rpc_request\nfrom ..models.address import Address, AddressValidateResult\nfrom ..models.enums import RPCMethods\nfrom ..shipengine_config import ShipEngineConfig\nfrom ..util import does_normalized_address_have_errors\n\n\ndef validate(address: Address, config: ShipEngineConfig) -> AddressValidateResult:\n \"\"\"\n Validate a single address via the `address/validate` remote procedure.\n\n :param Address address: The address to be validate.\n :param ShipEngineConfig config: The global ShipEngine configuration object.\n :returns: :class:`AddressValidateResult`: The response from ShipEngine API including the\n validated and normalized address.\n \"\"\"\n api_response: Dict[str, Any] = rpc_request(\n method=RPCMethods.ADDRESS_VALIDATE.value,\n config=config,\n params={\"address\": address.to_dict()}, # type: ignore\n )\n result: Dict[str, Any] = api_response[\"result\"]\n return AddressValidateResult(\n is_valid=result[\"isValid\"],\n request_id=api_response[\"id\"],\n normalized_address=Address.from_dict(result[\"normalizedAddress\"])\n if \"normalizedAddress\" in result\n else None,\n messages=result[\"messages\"],\n )\n\n\ndef normalize(address: Address, config: ShipEngineConfig) -> Address:\n \"\"\"\n Normalize a given address into a standardized format.\n\n :param Address address: The address to be validate.\n :param ShipEngineConfig config: The global ShipEngine configuration object.\n :returns: :class:`Address`: The normalized address returned from ShipEngine API.\n \"\"\"\n validation_result: AddressValidateResult = validate(address=address, config=config)\n does_normalized_address_have_errors(result=validation_result)\n return validation_result.normalized_address\n", "sub_path": "shipengine_sdk/services/address_validation.py", "file_name": "address_validation.py", "file_ext": "py", "file_size_in_byte": 1863, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "models.address.Address", "line_number": 11, "usage_type": "name"}, {"api_name": "shipengine_config.ShipEngineConfig", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 20, "usage_type": "name"}, {"api_name": "jsonrpc.rpc_request", "line_number": 20, "usage_type": "call"}, {"api_name": "models.enums.RPCMethods.ADDRESS_VALIDATE", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.enums.RPCMethods", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 25, "usage_type": "name"}, {"api_name": "models.address.AddressValidateResult", "line_number": 26, "usage_type": "call"}, {"api_name": "models.address.Address.from_dict", "line_number": 29, "usage_type": "call"}, {"api_name": "models.address.Address", "line_number": 29, "usage_type": "name"}, {"api_name": "models.address.AddressValidateResult", "line_number": 11, "usage_type": "name"}, {"api_name": "models.address.Address", "line_number": 36, "usage_type": "name"}, {"api_name": "shipengine_config.ShipEngineConfig", "line_number": 36, "usage_type": "name"}, {"api_name": "models.address.AddressValidateResult", "line_number": 44, "usage_type": "name"}, {"api_name": "util.does_normalized_address_have_errors", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "163973025", "text": "import os\nimport asyncio\nimport pathlib\nimport multiprocessing\nimport synapse.exc as s_exc\nimport synapse.glob as s_glob\nimport synapse.common as s_common\n\nfrom unittest.mock import patch\n\nimport synapse.lib.base as s_base\nimport synapse.lib.const as s_const\nimport synapse.lib.lmdbslab as s_lmdbslab\nimport synapse.lib.thisplat as s_thisplat\n\nimport synapse.tests.utils as s_t_utils\nfrom synapse.tests.utils import alist\n\ndef getFileMapCount(filename):\n filename = str(filename)\n count = 0\n with open(f'/proc/{os.getpid()}/maps') as maps:\n for line in maps:\n if len(line) < 50:\n continue\n if line.rstrip().endswith(filename):\n count += 1\n return count\n\nclass LmdbSlabTest(s_t_utils.SynTest):\n def __init__(self, *args, **kwargs):\n self._nowtime = 1000\n s_t_utils.SynTest.__init__(self, *args, **kwargs)\n\n async def test_lmdbslab_scankeys(self):\n\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n async with await s_lmdbslab.Slab.anit(path) as slab:\n\n testdb = slab.initdb('test')\n dupsdb = slab.initdb('dups', dupsort=True)\n editdb = slab.initdb('edit')\n\n self.eq((), list(slab.scanKeys(db=testdb)))\n self.eq((), list(slab.scanByDupsBack(b'asdf', db=dupsdb)))\n\n slab.put(b'hehe', b'haha', db=dupsdb)\n slab.put(b'hehe', b'lolz', db=dupsdb)\n slab.put(b'hoho', b'asdf', db=dupsdb)\n\n self.eq((), list(slab.scanByDupsBack(b'h\\x00', db=dupsdb)))\n\n slab.put(b'hehe', b'haha', db=testdb)\n slab.put(b'hoho', b'haha', db=testdb)\n\n testgenr = slab.scanKeys(db=testdb)\n dupsgenr = slab.scanKeys(db=dupsdb)\n\n testlist = [next(testgenr)]\n dupslist = [next(dupsgenr)]\n\n slab.put(b'derp', b'derp', db=editdb)\n\n # bump them both...\n await s_lmdbslab.Slab.syncLoopOnce()\n\n testlist.extend(testgenr)\n dupslist.extend(dupsgenr)\n\n self.eq(testlist, (b'hehe', b'hoho'))\n self.eq(dupslist, (b'hehe', b'hehe', b'hoho'))\n\n # now lets delete the key we're on\n testgenr = slab.scanKeys(db=testdb)\n dupsgenr = slab.scanKeys(db=testdb)\n\n testlist = [next(testgenr)]\n dupslist = [next(dupsgenr)]\n\n slab.delete(b'hehe', db=testdb)\n for lkey, lval in slab.scanByDups(b'hehe', db=dupsdb):\n slab.delete(lkey, lval, db=dupsdb)\n\n await s_lmdbslab.Slab.syncLoopOnce()\n\n testlist.extend(testgenr)\n dupslist.extend(dupsgenr)\n\n self.eq(testlist, (b'hehe', b'hoho'))\n self.eq(dupslist, (b'hehe', b'hoho'))\n\n # by pref\n self.eq([b'hoho'], list(slab.scanKeysByPref(b'h', db=dupsdb)))\n self.eq([], list(slab.scanKeysByPref(b'z', db=dupsdb)))\n\n async def test_lmdbslab_base(self):\n\n with self.getTestDir() as dirn0, self.getTestDir(startdir=dirn0) as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n\n await self.asyncraises(s_exc.BadArg, s_lmdbslab.Slab.anit(path, map_size=None))\n\n slab = await s_lmdbslab.Slab.anit(path, map_size=1000000, lockmemory=True)\n\n slabs = slab.getSlabsInDir(dirn)\n self.eq(slabs, [slab])\n\n slabs = slab.getSlabsInDir(pathlib.Path(dirn) / 'nowhere')\n self.len(0, slabs)\n\n slabs = slab.getSlabsInDir(pathlib.Path(dirn).parent)\n self.ge(1, len(slabs))\n\n foo = slab.initdb('foo')\n baz = slab.initdb('baz')\n bar = slab.initdb('bar', dupsort=True)\n empty = slab.initdb('empty')\n barfixed = slab.initdb('barfixed', dupsort=True, dupfixed=True)\n\n slab.put(b'\\x00\\x01', b'hehe', db=foo)\n slab.put(b'\\x00\\x02', b'haha', db=foo)\n slab.put(b'\\x01\\x03', b'hoho', db=foo)\n\n for db in (bar, barfixed):\n slab.put(b'\\x00\\x01', b'hehe', dupdata=True, db=db)\n slab.put(b'\\x00\\x02', b'haha', dupdata=True, db=db)\n slab.put(b'\\x00\\x02', b'visi', dupdata=True, db=db)\n slab.put(b'\\x00\\x02', b'zomg', dupdata=True, db=db)\n slab.put(b'\\x00\\x03', b'hoho', dupdata=True, db=db)\n\n slab.put(b'\\x00\\x01', b'hehe', db=baz)\n slab.put(b'\\xff', b'haha', db=baz)\n\n slab.put(b'\\xff\\xff', b'hoho', append=True, db=baz) # Should succeed\n slab.put(b'\\xaa\\xff', b'hoho', append=True, db=baz) # Should fail (not the last key)\n\n self.true(slab.dirty)\n\n self.true(slab.forcecommit())\n self.false(slab.dirty)\n\n self.eq(b'\\x00\\x01', slab.firstkey(db=foo))\n self.none(slab.firstkey(db=empty))\n self.eq(b'\\x01\\x03', slab.lastkey(db=foo))\n self.none(slab.lastkey(db=empty))\n\n self.eq(b'hehe', slab.get(b'\\x00\\x01', db=foo))\n\n items = list(slab.scanByPref(b'\\x00', db=foo))\n self.eq(items, ((b'\\x00\\x01', b'hehe'), (b'\\x00\\x02', b'haha')))\n\n items = list(slab.scanByRange(b'\\x00\\x02', b'\\x01\\x03', db=foo))\n self.eq(items, ((b'\\x00\\x02', b'haha'), (b'\\x01\\x03', b'hoho')))\n\n for db in (bar, barfixed):\n items = list(slab.scanByDups(b'\\x00\\x02', db=db))\n self.eq(items, ((b'\\x00\\x02', b'haha'), (b'\\x00\\x02', b'visi'), (b'\\x00\\x02', b'zomg')))\n\n items = list(slab.scanByDups(b'\\x00\\x04', db=db))\n self.eq(items, ())\n\n # Test scanByPref startkey, startvalu\n items = list(slab.scanByPref(b'\\x00', db=bar))\n alld = [(b'\\x00\\x01', b'hehe'),\n (b'\\x00\\x02', b'haha'),\n (b'\\x00\\x02', b'visi'),\n (b'\\x00\\x02', b'zomg'),\n (b'\\x00\\x03', b'hoho')]\n self.eq(alld, items)\n\n items = list(slab.scanByPref(b'\\x00', startkey=b'\\x02', db=bar))\n self.eq(alld[1:], items)\n\n items = list(slab.scanByPref(b'\\x00', startkey=b'\\x02', startvalu=b'vaaa', db=bar))\n self.eq(alld[2:], items)\n\n self.true(slab.prefexists(b'\\x00', db=baz))\n self.true(slab.prefexists(b'\\x00\\x01', db=baz))\n self.false(slab.prefexists(b'\\x00\\x03', db=baz))\n self.false(slab.prefexists(b'\\x02', db=baz))\n self.true(slab.prefexists(b'\\xff\\xff', db=baz))\n self.false(slab.prefexists(b'\\xff\\xff', db=foo))\n self.false(slab.prefexists(b'\\xaa\\xff', db=baz))\n\n self.true(slab.rangeexists(b'\\x00', b'\\x01', db=baz))\n self.true(slab.rangeexists(b'\\x00\\x00', b'\\x00\\x04', db=baz))\n self.false(slab.rangeexists(b'\\x00\\x04', b'\\x01', db=baz))\n self.true(slab.rangeexists(b'\\x05', None, db=baz))\n self.false(slab.rangeexists(b'\\xfa', b'\\xfc', db=baz))\n self.false(slab.rangeexists(b'\\x00\\x00', b'\\x00\\x00', db=foo))\n self.false(slab.rangeexists(b'\\x01\\x04', b'\\x01\\x05', db=foo))\n\n # backwards scan tests\n items = list(slab.scanByRangeBack(b'\\x00', db=foo))\n self.eq(items, ())\n\n items = list(slab.scanByPrefBack(b'\\x00', db=foo))\n self.eq(items, ((b'\\x00\\x02', b'haha'), (b'\\x00\\x01', b'hehe')))\n\n items = list(slab.scanByPrefBack(b'\\x01', db=foo))\n self.eq(items, ((b'\\x01\\x03', b'hoho'),))\n\n items = list(slab.scanByPrefBack(b'\\xff', db=baz))\n self.eq(items, ((b'\\xff\\xff', b'hoho'), (b'\\xff', b'haha')))\n\n items = list(slab.scanByRangeBack(b'\\x00\\x03', db=foo))\n self.eq(items, ((b'\\x00\\x02', b'haha'), (b'\\x00\\x01', b'hehe')))\n\n items = list(slab.scanByRangeBack(b'\\x00\\x03', b'\\x00\\x02', db=foo))\n self.eq(items, ((b'\\x00\\x02', b'haha'), ))\n\n items = list(slab.scanByRangeBack(b'\\x01\\x03', b'\\x00\\x02', db=foo))\n self.eq(items, ((b'\\x01\\x03', b'hoho'), (b'\\x00\\x02', b'haha')))\n\n items = list(slab.scanByRangeBack(b'\\x01\\x05', b'\\x00\\x02', db=foo))\n self.eq(items, ((b'\\x01\\x03', b'hoho'), (b'\\x00\\x02', b'haha')))\n\n for db in (bar, barfixed):\n items = list(slab.scanByDupsBack(b'\\x00\\x02', db=db))\n self.eq(items, ((b'\\x00\\x02', b'zomg'), (b'\\x00\\x02', b'visi'), (b'\\x00\\x02', b'haha')))\n\n items = list(slab.scanByDupsBack(b'\\x00\\x04', db=db))\n self.eq(items, ())\n\n with s_lmdbslab.ScanBack(slab, db=db) as scan:\n scan.first()\n self.eq(scan.atitem, (b'\\x00\\x03', b'hoho'))\n\n items = list(slab.scanByFullBack(db=foo))\n self.eq(items, ((b'\\x01\\x03', b'hoho'), (b'\\x00\\x02', b'haha'), (b'\\x00\\x01', b'hehe')))\n\n with s_lmdbslab.ScanBack(slab, db=foo) as scan:\n scan.set_key(b'\\x00\\x02')\n self.eq(scan.atitem, (b'\\x00\\x02', b'haha'))\n\n # test scans on emptydb\n\n emptydb = slab.initdb('empty')\n\n items = list(slab.scanByPrefBack(b'\\x00\\x01', db=emptydb))\n self.eq(items, ())\n\n items = list(slab.scanByPrefBack(b'\\xff\\xff', db=emptydb))\n self.eq(items, ())\n\n items = list(slab.scanByRangeBack(b'\\x00\\x01', db=emptydb))\n self.eq(items, ())\n\n items = list(slab.scanByFullBack(db=emptydb))\n self.eq(items, ())\n\n # ok... lets start a scan and then rip out the xact...\n scan = slab.scanByPref(b'\\x00', db=foo)\n self.eq((b'\\x00\\x01', b'hehe'), next(scan))\n\n slab.forcecommit()\n\n items = list(scan)\n self.eq(items, ((b'\\x00\\x02', b'haha'),))\n\n for db in (bar, barfixed):\n # to test iternext_dup, lets do the same with a dup scan\n scan = slab.scanByDups(b'\\x00\\x02', db=db)\n self.eq((b'\\x00\\x02', b'haha'), next(scan))\n\n slab.forcecommit()\n\n items = list(scan)\n self.eq(items, ((b'\\x00\\x02', b'visi'), (b'\\x00\\x02', b'zomg')))\n\n # do the same with backwards scanning\n scan = slab.scanByRangeBack(b'\\x01\\x03', db=foo)\n self.eq((b'\\x01\\x03', b'hoho'), next(scan))\n\n slab.forcecommit()\n\n items = list(scan)\n self.eq(items, ((b'\\x00\\x02', b'haha'), (b'\\x00\\x01', b'hehe')))\n\n # Copy a database inside the same slab\n self.raises(s_exc.DataAlreadyExists, slab.copydb, foo, slab, 'bar')\n self.eq(3, slab.copydb(foo, slab, 'foo2'))\n\n # Increase the size of the new source DB to trigger a resize on the next copydb\n foo2 = slab.initdb('foo2')\n slab.put(b'bigkey', b'x' * 1024 * 1024, dupdata=True, db=foo2)\n\n vardict = {}\n\n def progfunc(count):\n vardict['prog'] = count\n\n # Copy a database to a different slab\n path2 = os.path.join(dirn, 'test2.lmdb')\n async with await s_lmdbslab.Slab.anit(path2, map_size=512 * 1024) as slab2:\n with patch('synapse.lib.lmdbslab.PROGRESS_PERIOD', 2):\n\n self.eq(4, slab.copydb(foo2, slab2, destdbname='foo2', progresscb=progfunc))\n self.gt(vardict.get('prog', 0), 0)\n\n # Test slab.drop and slab.dbexists\n self.true(slab.dbexists('foo2'))\n slab.dropdb('foo2')\n self.false(slab.dbexists('foo2'))\n\n self.none(slab.dropdb('notadb'))\n\n # start a scan and then fini the whole db...\n scan = slab.scanByPref(b'\\x00', db=foo)\n self.eq((b'\\x00\\x01', b'hehe'), next(scan))\n\n scanback = slab.scanByPrefBack(b'\\x00', db=foo)\n self.eq((b'\\x00\\x02', b'haha'), next(scanback))\n\n await slab.fini()\n\n self.raises(s_exc.IsFini, next, scan)\n self.raises(s_exc.IsFini, next, scanback)\n\n slabs = s_lmdbslab.Slab.getSlabsInDir(dirn)\n self.len(0, slabs)\n\n # Ensure that our envar override for memory locking is acknowledged\n with self.setTstEnvars(SYN_LOCKMEM_DISABLE='1'):\n slab = await s_lmdbslab.Slab.anit(path, map_size=1000000, lockmemory=True)\n self.false(slab.lockmemory)\n self.none(slab.memlocktask)\n\n def simplenow(self):\n self._nowtime += 1000\n return self._nowtime\n\n async def test_lmdbslab_commit_warn(self):\n with self.getTestDir() as dirn, patch('synapse.lib.lmdbslab.Slab.WARN_COMMIT_TIME_MS', 1), \\\n patch('synapse.common.now', self.simplenow):\n path = os.path.join(dirn, 'test.lmdb')\n with self.getAsyncLoggerStream('synapse.lib.lmdbslab', 'Commit with') as stream:\n async with await s_lmdbslab.Slab.anit(path, map_size=100000) as slab:\n foo = slab.initdb('foo', dupsort=True)\n byts = b'\\x00' * 256\n for i in range(10):\n slab.put(b'\\xff\\xff\\xff\\xff' + s_common.guid(i).encode('utf8'), byts, db=foo)\n self.true(await stream.wait(timeout=1))\n\n async def test_lmdbslab_max_replay(self):\n with self.getTestDir() as dirn:\n path = os.path.join(dirn, 'test.lmdb')\n\n my_maxlen = 100\n\n # Make sure that we don't confuse the periodic commit with the max replay log commit\n\n with patch('synapse.lib.lmdbslab.Slab.COMMIT_PERIOD', 10):\n async with await s_lmdbslab.Slab.anit(path, map_size=100000, max_replay_log=my_maxlen) as slab:\n foo = slab.initdb('foo', dupsort=True)\n byts = b'\\x00' * 256\n\n waiter = s_base.Waiter(slab, 1, 'commit')\n\n for i in range(150):\n slab.put(b'\\xff\\xff\\xff\\xff' + s_common.guid(i).encode('utf8'), byts, db=foo)\n\n self.true(slab.syncevnt.is_set())\n\n retn = await waiter.wait(timeout=1)\n self.nn(retn)\n self.len(1, retn)\n\n async def test_lmdbslab_maxsize(self):\n with self.getTestDir() as dirn:\n path = os.path.join(dirn, 'test.lmdb')\n\n my_maxsize = 400000\n async with await s_lmdbslab.Slab.anit(path, map_size=100000, maxsize=my_maxsize) as slab:\n foo = slab.initdb('foo', dupsort=True)\n byts = b'\\x00' * 256\n\n # Trigger an out-of-space\n with self.raises(s_exc.DbOutOfSpace):\n\n for i in range(400):\n slab.put(b'\\xff\\xff\\xff\\xff' + s_common.guid(i).encode('utf8'), byts, db=foo)\n\n # lets ensure our maxsize persisted and it caps the mapsize\n async with await s_lmdbslab.Slab.anit(path, map_size=100000, readonly=True) as newdb:\n self.eq(my_maxsize, newdb.mapsize)\n self.eq(my_maxsize, newdb.maxsize)\n\n async def test_lmdbslab_scanbump(self):\n\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n\n async with await s_lmdbslab.Slab.anit(path, map_size=100000, growsize=10000) as slab:\n\n foo = slab.initdb('foo', dupsort=True)\n foo2 = slab.initdb('foo2', dupsort=False)\n bar = slab.initdb('bar', dupsort=True)\n\n multikey = b'\\xff\\xff\\xff\\xfe' + s_common.guid(2000).encode('utf8')\n\n byts = b'\\x00' * 256\n for i in range(10):\n slab.put(multikey, s_common.int64en(i), dupdata=True, db=foo)\n slab.put(s_common.int64en(i), byts, db=foo2)\n\n iter1 = slab.scanByDups(multikey, db=foo)\n iter2 = slab.scanByFull(db=foo2)\n\n for _ in range(6):\n next(iter1)\n next(iter2)\n\n iterback = slab.scanByDupsBack(multikey, db=foo)\n next(iterback)\n\n iterback2 = slab.scanByFullBack(db=foo2)\n next(iterback2)\n\n iterback3 = slab.scanByDupsBack(multikey, db=foo)\n iterback4 = slab.scanByFullBack(db=foo2)\n\n for _ in range(8):\n next(iterback3)\n next(iterback4)\n\n iterback5 = slab.scanByDupsBack(multikey, db=foo)\n next(iterback5)\n\n iterback6 = slab.scanByFullBack(db=foo2)\n next(iterback6)\n\n # Delete keys to cause set_range in iternext to fail\n for i in range(5):\n slab.delete(multikey, s_common.int64en(i + 5), db=foo)\n slab.delete(s_common.int64en(i + 5), db=foo2)\n\n slab.forcecommit()\n\n self.raises(StopIteration, next, iter1)\n self.raises(StopIteration, next, iter2)\n self.len(5, iterback)\n self.len(5, iterback2)\n\n # Delete all the keys in front of a backwards scan\n for i in range(4):\n slab.delete(multikey, s_common.int64en(i), db=foo)\n slab.delete(s_common.int64en(i), db=foo2)\n\n self.raises(StopIteration, next, iterback3)\n self.raises(StopIteration, next, iterback4)\n\n # Delete remaining keys so curs.last fails\n slab.delete(multikey, s_common.int64en(4), db=foo)\n slab.delete(s_common.int64en(4), db=foo2)\n\n self.raises(StopIteration, next, iterback5)\n self.raises(StopIteration, next, iterback6)\n\n slab.put(b'\\x00', b'asdf', dupdata=True, db=bar)\n slab.put(b'\\x01', b'qwer', dupdata=True, db=bar)\n iterback = slab.scanByRangeBack(b'\\x00', db=bar)\n self.eq((b'\\x00', b'asdf'), next(iterback))\n slab.delete(b'\\x00', b'asdf', db=bar)\n slab.forcecommit()\n self.raises(StopIteration, next, iterback)\n\n # range scan where we delete the entry we're on\n # and it's the only thing in the slab.\n iterrange = slab.scanByRange(b'\\x00', db=bar)\n self.eq((b'\\x01', b'qwer'), next(iterrange))\n slab.delete(b'\\x01', b'qwer', db=bar)\n slab.forcecommit()\n self.raises(StopIteration, next, iterrange)\n\n async def test_lmdbslab_scanbump2(self):\n\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n\n async with await s_lmdbslab.Slab.anit(path, map_size=100000, growsize=10000) as slab:\n\n dupydb = slab.initdb('dup', dupsort=True)\n dupndb = slab.initdb('ndup', dupsort=False)\n\n for db in (dupndb, dupydb):\n slab.put(b'1', b'', db=db)\n slab.put(b'2', b'', db=db)\n slab.put(b'3', b'', db=db)\n\n # forwards, bump after 2nd entry\n it = slab.scanByFull(db=db)\n self.eq((b'1', b''), next(it))\n self.eq((b'2', b''), next(it))\n slab.forcecommit()\n self.eq((b'3', b''), next(it))\n self.raises(StopIteration, next, it)\n\n # backwards, bump after 2nd entry\n it = slab.scanByFullBack(db=db)\n self.eq((b'3', b''), next(it))\n self.eq((b'2', b''), next(it))\n slab.forcecommit()\n self.eq((b'1', b''), next(it))\n self.raises(StopIteration, next, it)\n\n # forwards, bump/delete after 2nd entry\n it = slab.scanByFull(db=db)\n self.eq((b'1', b''), next(it))\n self.eq((b'2', b''), next(it))\n slab.forcecommit()\n slab.delete(b'2', db=db)\n self.eq((b'3', b''), next(it))\n self.raises(StopIteration, next, it)\n\n it = slab.scanByFull(db=db)\n self.eq((b'1', b''), next(it))\n slab.forcecommit()\n slab.delete(b'3', db=db)\n self.raises(StopIteration, next, it)\n\n slab.put(b'2', b'', db=db)\n slab.put(b'3', b'', db=db)\n\n # backwards, bump/delete after 2nd entry\n it = slab.scanByFullBack(db=db)\n self.eq((b'3', b''), next(it))\n self.eq((b'2', b''), next(it))\n slab.forcecommit()\n slab.delete(b'2', db=db)\n self.eq((b'1', b''), next(it))\n self.raises(StopIteration, next, it)\n\n it = slab.scanByFullBack(db=db)\n slab.forcecommit()\n slab.delete(b'3', db=db)\n self.eq((b'1', b''), next(it))\n self.raises(StopIteration, next, it)\n\n slab.delete(b'1', db=dupydb)\n slab.delete(b'2', db=dupydb)\n slab.delete(b'3', db=dupydb)\n slab.put(b'0', b'', db=dupydb)\n slab.put(b'1', b'1', db=dupydb)\n slab.put(b'1', b'2', db=dupydb)\n slab.put(b'1', b'3', db=dupydb)\n slab.put(b'2', b'', db=dupydb)\n\n # dupsort=yes, forwards, same keys, bump after 2nd entry\n it = slab.scanByFull(db=dupydb)\n self.eq((b'0', b''), next(it))\n self.eq((b'1', b'1'), next(it))\n self.eq((b'1', b'2'), next(it))\n slab.forcecommit()\n self.eq((b'1', b'3'), next(it))\n self.eq((b'2', b''), next(it))\n self.raises(StopIteration, next, it)\n\n # forwards, bump/delete after 2nd entry\n it = slab.scanByFull(db=dupydb)\n self.eq((b'0', b''), next(it))\n self.eq((b'1', b'1'), next(it))\n slab.forcecommit()\n slab.delete(b'1', val=b'2', db=dupydb)\n self.eq((b'1', b'3'), next(it))\n self.eq((b'2', b''), next(it))\n self.raises(StopIteration, next, it)\n\n it = slab.scanByFull(db=dupydb)\n self.eq((b'0', b''), next(it))\n self.eq((b'1', b'1'), next(it))\n self.eq((b'1', b'3'), next(it))\n slab.forcecommit()\n slab.delete(b'1', val=b'3', db=dupydb)\n self.eq((b'2', b''), next(it))\n self.raises(StopIteration, next, it)\n\n slab.put(b'1', b'2', db=dupydb)\n slab.put(b'1', b'3', db=dupydb)\n\n # dupsort=yes, backwards, same keys, bump after 2nd entry\n it = slab.scanByFullBack(db=dupydb)\n self.eq((b'2', b''), next(it))\n self.eq((b'1', b'3'), next(it))\n self.eq((b'1', b'2'), next(it))\n slab.forcecommit()\n self.eq((b'1', b'1'), next(it))\n self.eq((b'0', b''), next(it))\n self.raises(StopIteration, next, it)\n\n # dupsort=yes, backwards, same keys, bump/delete after 2nd entry\n it = slab.scanByFullBack(db=dupydb)\n self.eq((b'2', b''), next(it))\n self.eq((b'1', b'3'), next(it))\n self.eq((b'1', b'2'), next(it))\n slab.forcecommit()\n slab.delete(b'1', val=b'2', db=dupndb)\n self.eq((b'1', b'1'), next(it))\n self.eq((b'0', b''), next(it))\n self.raises(StopIteration, next, it)\n\n slab.put(b'1', b'2', db=dupydb)\n slab.put(b'1', b'3', db=dupydb)\n\n # single key, forwards, bump after 2nd entry\n it = slab.scanByDups(db=dupydb, lkey=b'1')\n self.eq((b'1', b'1'), next(it))\n self.eq((b'1', b'2'), next(it))\n slab.forcecommit()\n self.eq((b'1', b'3'), next(it))\n self.raises(StopIteration, next, it)\n\n # single key, forwards, bump/delete after 2nd entry\n it = slab.scanByDups(db=dupydb, lkey=b'1')\n self.eq((b'1', b'1'), next(it))\n slab.forcecommit()\n slab.delete(b'1', val=b'2', db=dupydb)\n self.eq((b'1', b'3'), next(it))\n self.raises(StopIteration, next, it)\n\n it = slab.scanByDups(db=dupydb, lkey=b'1')\n self.eq((b'1', b'1'), next(it))\n slab.forcecommit()\n slab.delete(b'1', val=b'3', db=dupydb)\n self.raises(StopIteration, next, it)\n\n slab.put(b'1', b'2', db=dupydb)\n slab.put(b'1', b'3', db=dupydb)\n\n # dupsort=yes, backwards, same keys, bump after 2nd entry\n it = slab.scanByDupsBack(db=dupydb, lkey=b'1')\n self.eq((b'1', b'3'), next(it))\n self.eq((b'1', b'2'), next(it))\n slab.forcecommit()\n self.eq((b'1', b'1'), next(it))\n self.raises(StopIteration, next, it)\n\n # dupsort=yes, backwards, same keys, bump/delete after 2nd entry\n it = slab.scanByDupsBack(db=dupydb, lkey=b'1')\n self.eq((b'1', b'3'), next(it))\n self.eq((b'1', b'2'), next(it))\n slab.forcecommit()\n slab.delete(b'1', val=b'2', db=dupndb)\n self.eq((b'1', b'1'), next(it))\n self.raises(StopIteration, next, it)\n\n async def test_lmdbslab_count_empty(self):\n\n with self.getTestDir() as dirn:\n path = os.path.join(dirn, 'test.lmdb')\n async with await s_lmdbslab.Slab.anit(path, map_size=100000, growsize=10000) as slab:\n self.eq(0, await slab.countByPref(b'asdf'))\n\n async def test_lmdbslab_grow(self):\n\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n\n async with await s_lmdbslab.Slab.anit(path, map_size=100000, growsize=10000) as slab:\n\n foo = slab.initdb('foo', dupsort=True)\n foo2 = slab.initdb('foo2', dupsort=False)\n\n byts = b'\\x00' * 256\n for i in range(100):\n slab.put(s_common.guid(i).encode('utf8'), byts, db=foo)\n slab.put(s_common.guid(1000 + i).encode('utf8'), byts, db=foo2)\n\n count = 0\n for _, _ in slab.scanByRange(b'', db=foo):\n count += 1\n self.eq(count, 100)\n\n count = 0\n for _, _ in slab.scanByRangeBack(b'ffffffffffffffffffffffffffffffff', db=foo):\n count += 1\n self.eq(count, 100)\n\n # Trigger a grow/bump in the middle of a scan; make sure new nodes come after current scan position\n iter = slab.scanByRange(b'', db=foo)\n for _ in range(50):\n next(iter)\n\n iterback = slab.scanByRangeBack(b'ffffffffffffffffffffffffffffffff', db=foo)\n for _ in range(50):\n next(iterback)\n\n multikey = b'\\xff\\xff\\xff\\xfe' + s_common.guid(2000).encode('utf8')\n mapsize = slab.mapsize\n count = 0\n\n # Write until we grow\n while mapsize == slab.mapsize:\n count += 1\n rv = slab.put(multikey, s_common.guid(count + 100000).encode('utf8') + byts, dupdata=True, db=foo)\n self.true(rv)\n\n self.eq(50 + count, sum(1 for _ in iter))\n self.eq(50, sum(1 for _ in iterback))\n\n self.true(os.path.isfile(slab.optspath))\n\n # Trigger a grow/bump in the middle of a dup scan\n iter = slab.scanByDups(multikey, db=foo)\n next(iter)\n\n iter2 = slab.scanByFull(db=foo2)\n next(iter2)\n\n iterback = slab.scanByDupsBack(multikey, db=foo)\n next(iterback)\n\n iterback2 = slab.scanByFullBack(db=foo2)\n next(iterback2)\n\n multikey = b'\\xff\\xff\\xff\\xff' + s_common.guid(i + 150000).encode('utf8')\n for i in range(200):\n slab.put(multikey, s_common.guid(i + 200000).encode('utf8') + byts, dupdata=True, db=foo)\n\n self.eq(count - 1, sum(1 for _ in iter))\n self.eq(99, sum(1 for _ in iter2))\n\n self.eq(count - 1, sum(1 for _ in iterback))\n self.eq(99, sum(1 for _ in iterback2))\n\n # lets ensure our mapsize / growsize persisted, and make sure readonly works\n async with await s_lmdbslab.Slab.anit(path, map_size=100000, readonly=True) as newdb:\n\n self.eq(10000, newdb.growsize)\n foo = newdb.initdb('foo', dupsort=True)\n for _, _ in newdb.scanByRange(b'', db=foo):\n count += 1\n self.gt(count, 200)\n\n # Make sure readonly is really readonly\n self.raises(s_exc.IsReadOnly, newdb.dropdb, 'foo')\n self.raises(s_exc.IsReadOnly, newdb.put, b'1234', b'3456')\n self.raises(s_exc.IsReadOnly, newdb.replace, b'1234', b'3456')\n self.raises(s_exc.IsReadOnly, newdb.pop, b'1234')\n self.raises(s_exc.IsReadOnly, newdb.delete, b'1234')\n self.raises(s_exc.IsReadOnly, newdb.putmulti, ((b'1234', b'3456'),))\n\n # While we have the DB open in readonly, have another process write a bunch of data to cause the\n # map size to be increased\n\n ctx = multiprocessing.get_context('spawn')\n proc = ctx.Process(target=_writeproc, args=(path, ))\n proc.start()\n proc.join()\n\n # Now trigger a remap for me\n newdb.get(multikey, db=foo)\n\n async def test_lmdbslab_grow_putmulti(self):\n '''\n Test for a regression where putmulti's across a grow could corrupt the database\n\n Test for a regression where a generator being passed into a putmulti would result in a partial write\n '''\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n data = [i.to_bytes(4, 'little') for i in range(1000)]\n\n async with await s_lmdbslab.Slab.anit(path, map_size=10000) as slab:\n # A putmulti across a grow\n before_mapsize = slab.mapsize\n kvpairs = [(x, x) for x in data]\n retn = slab.putmulti(kvpairs)\n self.eq(retn, (1000, 1000))\n\n after_mapsize1 = slab.mapsize\n self.gt(after_mapsize1, before_mapsize)\n\n # A putmulti across a grow with a generator passed in\n kvpairs = ((b' ' + x, x) for x in data)\n retn = slab.putmulti(kvpairs)\n self.eq(retn, (1000, 1000))\n after_mapsize2 = slab.mapsize\n self.gt(after_mapsize2, after_mapsize1)\n\n async def test_lmdbslab_iternext_repeat_regression(self):\n '''\n Test for a scan being bumped in an iternext where the cursor is in the middle of a list of values with the same\n key\n '''\n\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n my_maxsize = 500000\n\n async with await s_lmdbslab.Slab.anit(path, map_size=100000, growsize=50000, maxsize=my_maxsize) as slab:\n foo = slab.initdb('foo', dupsort=True)\n\n key = b'foo'\n for i in range(100):\n slab.put(key, s_common.guid(i).encode('utf8'), db=foo)\n\n count = 0\n for _, _ in slab.scanByRange(b'', db=foo):\n count += 1\n self.eq(count, 100)\n\n # Partially read through scan\n iter = slab.scanByRange(lmin=key, lmax=key, db=foo)\n for _ in range(60):\n next(iter)\n\n # Trigger a bump by writing a bunch; make sure we're not writing into the middle of the scan\n multikey = b'\\xff\\xff\\xff\\xff' + s_common.guid(200).encode('utf8')\n mapsize = slab.mapsize\n count = 0\n while mapsize == slab.mapsize:\n count += 1\n slab.put(multikey, s_common.guid(count).encode('utf8') + b'0' * 256, dupdata=True, db=foo)\n\n # we wrote 100, read 60. We should read only another 40\n self.len(40, list(iter))\n\n async def test_slab_guid_stor(self):\n\n with self.getTestDir() as dirn:\n path = os.path.join(dirn, 'slab.lmdb')\n async with await s_lmdbslab.Slab.anit(path) as slab:\n guidstor = s_lmdbslab.GuidStor(slab, 'guids')\n\n info0 = guidstor.gen('aaaa')\n info0.set('hehe', 20)\n self.eq(20, info0.get('hehe'))\n self.none(info0.get('haha'))\n\n info0.set('woot', {'woot': 1})\n self.eq((('hehe', 20), ('woot', {'woot': 1})), info0.items())\n\n self.eq({'woot': 1}, info0.get('woot'))\n self.eq({'woot': 1}, info0.pop('woot'))\n self.none(info0.get('woot'))\n self.none(info0.pop('woot'))\n self.true(info0.pop('woot', s_common.novalu) is s_common.novalu)\n\n # Sad path case\n self.raises(s_exc.NotMsgpackSafe, info0.set, 'newp', {1, 2, 3})\n\n async with await s_lmdbslab.Slab.anit(path) as slab:\n guidstor = s_lmdbslab.GuidStor(slab, 'guids')\n info1 = guidstor.gen('aaaa')\n self.eq(20, info1.get('hehe'))\n self.none(info1.pop('woot'))\n self.len(1, info1.items())\n self.eq((('hehe', 20), ), info1.items())\n\n async def test_slab_initdb_grow(self):\n self.thisHostMust(platform='linux')\n\n with self.getTestDir() as dirn:\n path = os.path.join(dirn, 'slab.lmdb')\n async with await s_lmdbslab.Slab.anit(path, map_size=1024, lockmemory=True) as slab:\n self.true(await asyncio.wait_for(slab.lockdoneevent.wait(), 8))\n mapcount = getFileMapCount('slab.lmdb/data.mdb')\n self.eq(1, mapcount)\n\n mapsize = slab.mapsize\n [slab.initdb(str(i)) for i in range(10)]\n self.gt(slab.mapsize, mapsize)\n\n # Make sure there is still only one map\n self.true(await asyncio.wait_for(slab.lockdoneevent.wait(), 8))\n\n mapcount = getFileMapCount('slab.lmdb/data.mdb')\n self.eq(1, mapcount)\n\n def test_slab_math(self):\n self.eq(s_lmdbslab._mapsizeround(100), 128)\n self.eq(s_lmdbslab._mapsizeround(s_const.mebibyte), s_const.mebibyte)\n self.eq(s_lmdbslab._mapsizeround(s_const.mebibyte + 1), 2 * s_const.mebibyte)\n self.eq(s_lmdbslab._mapsizeround(65 * s_const.gibibyte), 100 * s_const.gibibyte)\n self.eq(s_lmdbslab._mapsizeround(472 * s_const.gibibyte), 500 * s_const.gibibyte)\n self.eq(s_lmdbslab._mapsizeround(1000 * s_const.gibibyte), 1000 * s_const.gibibyte)\n\n async def test_slab_infinite_loop(self):\n '''\n Trigger a map full when replaying the log from a prior map full.\n '''\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n byts = b'\\x00' * 256\n\n count = 0\n async with await s_lmdbslab.Slab.anit(path, map_size=32000, growsize=5000, lockmemory=True) as slab:\n foo = slab.initdb('foo')\n slab.put(b'abcd', s_common.guid(count).encode('utf8') + byts, db=foo)\n await asyncio.sleep(1.1)\n count += 1\n slab.put(b'abcd', s_common.guid(count).encode('utf8') + byts, db=foo)\n\n # If we got here we're good\n self.true(True)\n\n async def test_slab_mapfull_runsyncloop(self):\n '''\n forcecommit in runSyncLoop can very occasionally trigger a mapfull\n '''\n with patch('synapse.lib.lmdbslab.Slab.DEFAULT_MAPSIZE', s_const.mebibyte), \\\n patch('synapse.lib.lmdbslab.Slab.DEFAULT_GROWSIZE', 128 * s_const.kibibyte):\n batchsize = 4000\n numbatches = 2\n async with self.getTestCore() as core:\n before_mapsize = core.view.layers[0].layrslab.mapsize\n for i in range(numbatches):\n async with await core.snap() as snap:\n ips = ((('test:int', i * 1000000 + x), {'props': {'loc': 'us'}}) for x in range(batchsize))\n await alist(snap.addNodes(ips))\n # Wait for the syncloop to run\n await asyncio.sleep(1.1)\n\n # Verify that it hit\n self.gt(core.view.layers[0].layrslab.mapsize, before_mapsize)\n\n async def test_slab_mapfull_drop(self):\n '''\n Test a mapfull in the middle of a dropdb\n '''\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n data = [i.to_bytes(4, 'little') for i in range(400)]\n\n async with await s_lmdbslab.Slab.anit(path, map_size=32000, growsize=5000) as slab:\n slab.initdb('foo')\n kvpairs = [(x, x) for x in data]\n slab.putmulti(kvpairs)\n slab.forcecommit()\n before_mapsize = slab.mapsize\n slab.dropdb('foo')\n self.false(slab.dbexists('foo'))\n self.gt(slab.mapsize, before_mapsize)\n\n @staticmethod\n def make_slab(path):\n '''\n Multiprocessing target for expanding an existing slab\n '''\n async def workloop():\n s_glob.iAmLoop()\n data = [i.to_bytes(4, 'little') for i in range(400)]\n async with await s_lmdbslab.Slab.anit(path, map_size=32000, growsize=5000) as slab:\n slab.initdb('foo')\n kvpairs = [(x, x) for x in data]\n slab.putmulti(kvpairs)\n slab.forcecommit()\n\n asyncio.run(workloop())\n\n async def test_slab_mapfull_initdb(self):\n '''\n Test a mapfull in the middle of an initdb\n '''\n mpctx = multiprocessing.get_context('spawn')\n with self.getTestDir() as dirn:\n path = os.path.join(dirn, 'test.lmdb')\n async with await s_lmdbslab.Slab.anit(path, map_size=32000) as slab:\n pass\n async with await s_lmdbslab.Slab.anit(path, map_size=32000, readonly=True) as slab:\n\n proc = mpctx.Process(target=self.make_slab, args=(path,))\n proc.start()\n proc.join(10)\n self.nn(proc.exitcode)\n slab.initdb('foo')\n self.true(True)\n\n async def test_lmdb_multiqueue(self):\n\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n\n async with await s_lmdbslab.Slab.anit(path) as slab:\n\n mque = await slab.getMultiQueue('test')\n\n self.false(mque.exists('woot'))\n\n with self.raises(s_exc.NoSuchName):\n await mque.rem('woot')\n\n with self.raises(s_exc.NoSuchName):\n await mque.get('woot', 0)\n\n with self.raises(s_exc.NoSuchName):\n await mque.put('woot', 'lulz')\n\n with self.raises(s_exc.NoSuchName):\n mque.status('woot')\n\n with self.raises(s_exc.NoSuchName):\n await mque.cull('woot', -1)\n\n with self.raises(s_exc.NoSuchName):\n await mque.dele('woot', 1, 1)\n\n with self.raises(s_exc.NoSuchName):\n await mque.sets('woot', 1, ('lols',))\n\n await mque.add('woot', {'some': 'info'})\n await self.asyncraises(s_exc.DupName, mque.add('woot', {}))\n\n self.true(mque.exists('woot'))\n\n self.eq(0, await mque.put('woot', 'hehe'))\n self.eq(1, await mque.put('woot', 'haha'))\n self.eq(2, await mque.put('woot', 'hoho'))\n\n self.eq(3, mque.size('woot'))\n\n self.eq(3, await mque.put('woot', 'lol', reqid='foo'))\n self.eq(4, await mque.put('woot', 'lol', reqid='foo'))\n self.eq(4, await mque.put('woot', 'lol', reqid='foo'))\n\n self.eq(4, await mque.puts('woot', ('lol2', 'lol3'), reqid='foo2'))\n self.eq(6, await mque.puts('woot', ('lol2', 'lol3'), reqid='foo2'))\n self.eq(6, await mque.puts('woot', ('lol2', 'lol3'), reqid='foo2'))\n\n self.eq((0, 'hehe'), await mque.get('woot', 0))\n self.eq((1, 'haha'), await mque.get('woot', 1))\n self.eq((1, 'haha'), await mque.get('woot', 0))\n\n self.eq((-1, None), await mque.get('woot', 1000, cull=False))\n\n self.eq(5, mque.size('woot'))\n\n status = mque.list()\n self.len(1, status)\n self.eq(status[0], {'name': 'woot',\n 'meta': {'some': 'info'},\n 'size': 5,\n 'offs': 6,\n })\n\n await mque.cull('woot', -1)\n self.eq(mque.status('woot'), status[0])\n\n async with await s_lmdbslab.Slab.anit(path) as slab:\n\n mque = await slab.getMultiQueue('test')\n\n self.eq(5, mque.size('woot'))\n self.eq(6, mque.offset('woot'))\n\n self.eq(((1, 'haha'), ), [x async for x in mque.gets('woot', 0, size=1)])\n\n correct = ((1, 'haha'), (2, 'hoho'), (3, 'lol'), (4, 'lol2'), (5, 'lol3'))\n self.eq(correct, [x async for x in mque.gets('woot', 0)])\n\n data = []\n evnt = asyncio.Event()\n\n async def getswait():\n async for item in mque.gets('woot', 0, wait=True):\n\n if item[1] is None:\n break\n\n data.append(item)\n\n if item[1] == 'hoho':\n evnt.set()\n\n task = slab.schedCoro(getswait())\n\n await asyncio.wait_for(evnt.wait(), 5)\n\n self.eq(data, correct)\n\n await mque.put('woot', 'lulz')\n await mque.put('woot', None)\n\n await asyncio.wait_for(task, 2)\n\n self.eq(data, (*correct, (6, 'lulz')))\n\n self.true(mque.exists('woot'))\n\n self.eq((2, 'hoho'), await mque.get('woot', 2))\n\n await mque.put('woot', 'huhu')\n\n await mque.rem('woot')\n\n self.false(mque.exists('woot'))\n\n await mque.add('woot', {'some': 'info'})\n self.eq(0, await mque.put('woot', 'hehe'))\n self.eq(1, await mque.put('woot', 'haha'))\n self.eq(2, await mque.put('woot', 'hoho'))\n\n self.eq(3, mque.size('woot'))\n self.eq(3, mque.offset('woot'))\n\n # Replace one item in the queue\n await mque.sets('woot', 1, ('lol',))\n self.eq(3, mque.size('woot'))\n self.eq(3, mque.offset('woot'))\n\n correct = ((0, 'hehe'), (1, 'lol'), (2, 'hoho'))\n self.eq(correct, [x async for x in mque.gets('woot', 0)])\n\n # Replace multiple items in the queue\n await mque.sets('woot', 1, ('lol2', 'lol3'))\n self.eq(3, mque.size('woot'))\n self.eq(3, mque.offset('woot'))\n\n correct = ((0, 'hehe'), (1, 'lol2'), (2, 'lol3'))\n self.eq(correct, [x async for x in mque.gets('woot', 0)])\n\n # Replace items going past the end of the current queue\n await mque.sets('woot', 2, ('lol4', 'lol5', 'lol6'))\n self.eq(5, mque.size('woot'))\n self.eq(5, mque.offset('woot'))\n\n correct = ((0, 'hehe'), (1, 'lol2'), (2, 'lol4'), (3, 'lol5'), (4, 'lol6'))\n self.eq(correct, [x async for x in mque.gets('woot', 0)])\n\n # Delete from the middle of the queue\n await mque.dele('woot', 1, 3)\n self.eq(2, mque.size('woot'))\n self.eq(5, mque.offset('woot'))\n\n correct = ((0, 'hehe'), (4, 'lol6'))\n self.eq(correct, [x async for x in mque.gets('woot', 0)])\n\n # Add items in the gap we created\n await mque.sets('woot', 2, ('lol7', 'lol8'))\n self.eq(4, mque.size('woot'))\n self.eq(5, mque.offset('woot'))\n\n correct = ((0, 'hehe'), (2, 'lol7'), (3, 'lol8'), (4, 'lol6'))\n self.eq(correct, [x async for x in mque.gets('woot', 0)])\n\n # Delete a partially empty range\n await mque.dele('woot', 3, 10)\n self.eq(2, mque.size('woot'))\n self.eq(5, mque.offset('woot'))\n\n correct = ((0, 'hehe'), (2, 'lol7'))\n self.eq(correct, [x async for x in mque.gets('woot', 0)])\n\n # Delete a completely empty range\n await mque.dele('woot', 100, 150)\n self.eq(2, mque.size('woot'))\n self.eq(5, mque.offset('woot'))\n\n correct = ((0, 'hehe'), (2, 'lol7'))\n self.eq(correct, [x async for x in mque.gets('woot', 0)])\n\n # Set items past the end of the current queue\n await mque.sets('woot', 200, ('lol9', 'lol0'))\n self.eq(4, mque.size('woot'))\n self.eq(202, mque.offset('woot'))\n\n correct = ((0, 'hehe'), (2, 'lol7'), (200, 'lol9'), (201, 'lol0'))\n self.eq(correct, [x async for x in mque.gets('woot', 0)])\n\n # Adding items past the current end of queue should wake waiters\n data = []\n\n async def getswait():\n async for item in mque.gets('woot', 0, wait=True):\n\n data.append(item)\n\n if item[1] == 'lol0':\n break\n\n task = slab.schedCoro(getswait())\n\n await mque.sets('woot', 201, ('lol9', 'lol0'))\n await asyncio.wait_for(task, 2)\n\n self.eq(5, mque.size('woot'))\n self.eq(203, mque.offset('woot'))\n\n correct = ((0, 'hehe'), (2, 'lol7'), (200, 'lol9'), (201, 'lol9'), (202, 'lol0'))\n self.eq(data, correct)\n\n # Invalid offsets that won't do anything\n await mque.dele('woot', -1, 20)\n await mque.dele('woot', -5, -1)\n await mque.dele('woot', 5, 1)\n await mque.dele('woot', 5, -1)\n await mque.sets('woot', -1, ('lolz', 'lol'))\n\n self.eq(5, mque.size('woot'))\n self.eq(203, mque.offset('woot'))\n\n correct = ((0, 'hehe'), (2, 'lol7'), (200, 'lol9'), (201, 'lol9'), (202, 'lol0'))\n self.eq(correct, [x async for x in mque.gets('woot', 0)])\n\n async def test_slababrv(self):\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n\n async with await s_lmdbslab.Slab.anit(path) as slab:\n abrv = s_lmdbslab.SlabAbrv(slab, 'test')\n\n valu = abrv.setBytsToAbrv('hehe'.encode())\n self.eq(valu, b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00')\n valu = abrv.setBytsToAbrv('haha'.encode())\n self.eq(valu, b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01')\n\n name = abrv.abrvToByts(b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01')\n self.eq(name, b'haha')\n\n self.raises(s_exc.NoSuchAbrv, abrv.abrvToByts, b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02')\n\n # And persistence\n async with await s_lmdbslab.Slab.anit(path) as slab:\n abrv = s_lmdbslab.SlabAbrv(slab, 'test')\n # recall first\n name = abrv.abrvToByts(b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00')\n self.eq(name, b'hehe')\n\n name = abrv.abrvToByts(b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01')\n self.eq(name, b'haha')\n # Remaking them makes the values we already had\n valu = abrv.nameToAbrv('hehe')\n self.eq(valu, b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00')\n\n valu = abrv.nameToAbrv('haha')\n self.eq(valu, b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01')\n\n self.eq('haha', abrv.abrvToName(b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01'))\n\n # And we still have no valu for 02\n self.raises(s_exc.NoSuchAbrv, abrv.abrvToByts, b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02')\n\n # And we don't overwrite existing values on restart\n valu = abrv.setBytsToAbrv('hoho'.encode())\n self.eq(valu, b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02')\n\n valu = abrv.nameToAbrv('haha')\n self.eq(valu, b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01')\n\n async def test_lmdbslab_hotkeyval(self):\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n\n async with await s_lmdbslab.Slab.anit(path, map_size=1000000) as slab, \\\n await s_lmdbslab.HotKeyVal.anit(slab, 'counts') as ctr:\n self.eq(None, ctr.get('foo'))\n self.eq({}, ctr.pack())\n ctr.set('foo', 1)\n ctr.set('bar', {'val': 42})\n self.eq({'foo': 1, 'bar': {'val': 42}}, ctr.pack())\n ctr.set('baz', 42)\n ctr.delete('baz')\n self.eq(None, ctr.get('baz'))\n\n async with await s_lmdbslab.Slab.anit(path, map_size=1000000) as slab, \\\n await s_lmdbslab.HotKeyVal.anit(slab, 'counts') as ctr:\n self.eq({'foo': 1, 'bar': {'val': 42}}, ctr.pack())\n self.eq({'val': 42}, ctr.get('bar'))\n\n async def test_lmdbslab_hotcount(self):\n\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n\n async with await s_lmdbslab.Slab.anit(path, map_size=1000000, lockmemory=True) as slab, \\\n await s_lmdbslab.HotCount.anit(slab, 'counts') as ctr:\n self.eq(0, ctr.get('foo'))\n self.eq({}, ctr.pack())\n ctr.inc('foo')\n self.eq({'foo': 1}, ctr.pack())\n self.eq(1, ctr.get('foo'))\n ctr.set('bar', 42)\n self.eq({'foo': 1, 'bar': 42}, ctr.pack())\n ctr.sync()\n self.eq({'foo': 1, 'bar': 42}, ctr.pack())\n\n ctr.inc('foo')\n ctr.inc('foo')\n ctr.set('bar', 37)\n ctr.set('bar', -42)\n ctr.sync()\n\n cache = []\n for lkey, lval in slab.scanByFull(db='counts'):\n cache.append((lkey, s_common.int64un(lval)))\n\n self.len(1, [k for k, v in cache if k == b'foo'])\n self.len(1, [k for k, v in cache if k == b'bar'])\n\n async def test_lmdbslab_doubleopen(self):\n\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n async with await s_lmdbslab.Slab.anit(path) as slab:\n foo = slab.initdb('foo')\n slab.put(b'\\x00\\x01', b'hehe', db=foo)\n\n # Can close and re-open fine\n async with await s_lmdbslab.Slab.anit(path) as slab:\n foo = slab.initdb('foo')\n self.eq(b'hehe', slab.get(b'\\x00\\x01', db=foo))\n\n # Can't re-open while already open\n await self.asyncraises(s_exc.SlabAlreadyOpen, s_lmdbslab.Slab.anit(path))\n\n async def test_lmdbslab_copyslab(self):\n\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n copypath = os.path.join(dirn, 'copy.lmdb')\n\n async with await s_lmdbslab.Slab.anit(path) as slab:\n foo = slab.initdb('foo')\n slab.put(b'\\x00\\x01', b'hehe', db=foo)\n\n await slab.copyslab(copypath)\n\n self.true(pathlib.Path(copypath).with_suffix('.opts.yaml').exists())\n\n async with await s_lmdbslab.Slab.anit(copypath) as slabcopy:\n foo = slabcopy.initdb('foo')\n self.eq(b'hehe', slabcopy.get(b'\\x00\\x01', db=foo))\n\n await self.asyncraises(s_exc.DataAlreadyExists, slab.copyslab(copypath))\n\n async def test_lmdbslab_statinfo(self):\n\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n\n async with await s_lmdbslab.Slab.anit(path) as slab:\n\n foo = slab.initdb('foo')\n\n slab.put(b'\\x00\\x01', b'hehe', db=foo)\n slab.put(b'\\x00\\x02', b'haha', db=foo)\n await slab.sync()\n\n stats = slab.statinfo()\n\n self.false(stats['locking_memory'])\n self.false(stats['prefaulting'])\n\n commitstats = stats['commitstats']\n self.len(2, commitstats)\n self.eq(2, commitstats[-1][1])\n\n async def test_lmdbslab_iter_and_delete(self):\n with self.getTestDir() as dirn:\n path = os.path.join(dirn, 'test.lmdb')\n async with await s_lmdbslab.Slab.anit(path, map_size=1000000, lockmemory=True) as slab:\n bar = slab.initdb('bar', dupsort=True)\n slab.put(b'\\x00\\x01', b'hehe', dupdata=True, db=bar)\n slab.put(b'\\x00\\x02', b'haha', dupdata=True, db=bar)\n scan = slab.scanByDups(b'\\x00\\x02', db=bar)\n self.eq((b'\\x00\\x02', b'haha'), next(scan))\n slab.delete(b'\\x00\\x01', b'hehe', db=bar)\n self.raises(StopIteration, next, scan)\n\n async def test_lmdbslab_hist(self):\n with self.getTestDir() as dirn:\n path = os.path.join(dirn, 'test.lmdb')\n async with await s_lmdbslab.Slab.anit(path, map_size=1000000) as slab:\n now = s_common.now()\n hist = s_lmdbslab.Hist(slab, 'history')\n hist.add('foo')\n await asyncio.sleep(0.1)\n hist.add('bar')\n then = s_common.now()\n\n items = []\n for item in hist.carve(now, then):\n items.append(item)\n self.len(2, items)\n self.eq([item[1] for item in items], ['foo', 'bar'])\n\n # Carve from an arbitrary point forward to the end\n tick = items[1][0]\n await asyncio.sleep(0.1)\n hist.add('baz')\n items = []\n for item in hist.carve(tick):\n items.append(item)\n self.len(2, items)\n self.eq([item[1] for item in items], ['bar', 'baz'])\n\n # Add a item at given tick and carve it\n hist.add('timewarp', tick=now - 10)\n\n items = []\n for item in hist.carve(now - 15, then):\n items.append(item)\n self.len(3, items)\n self.eq([item[1] for item in items], ['timewarp', 'foo', 'bar'])\n\n # boundary conditions\n\n # Minimum values\n hist.add('bot', tick=0)\n with self.raises(OverflowError):\n hist.add('bot', tick=-1)\n\n # Maximum value we can store\n hist.add('eot', tick=(2 * 9223372036854775807) + 1)\n with self.raises(OverflowError):\n hist.add('eot', tick=(2 * 9223372036854775807) + 2)\n\n # Tablescan\n items = []\n for item in hist.carve(0):\n items.append(item[1])\n self.len(6, items)\n self.eq(items, ['bot', 'timewarp', 'foo', 'bar', 'baz', 'eot'])\n\n\nclass LmdbSlabMemLockTest(s_t_utils.SynTest):\n\n async def test_lmdbslabmemlock(self):\n self.thisHostMust(hasmemlocking=True)\n\n beforelockmem = s_thisplat.getCurrentLockedMemory()\n\n with self.getTestDir() as dirn:\n\n path = os.path.join(dirn, 'test.lmdb')\n async with await s_lmdbslab.Slab.anit(path, map_size=1000000, lockmemory=True) as lmdbslab:\n\n self.true(await asyncio.wait_for(lmdbslab.lockdoneevent.wait(), 8))\n lockmem = s_thisplat.getCurrentLockedMemory()\n self.ge(lockmem - beforelockmem, 4000)\n\n async def test_multiple_grow(self):\n '''\n Trigger multiple grow events rapidly and ensure memlock thread survives.\n '''\n self.thisHostMust(hasmemlocking=True)\n\n with self.getTestDir() as dirn:\n\n count = 0\n byts = b'\\x00' * 1024\n path = os.path.join(dirn, 'test.lmdb')\n mapsize = 10 * 1024 * 1024\n async with await s_lmdbslab.Slab.anit(path, map_size=mapsize, growsize=5000, lockmemory=True) as slab:\n foo = slab.initdb('foo')\n while count < 8000:\n count += 1\n slab.put(s_common.guid(count).encode('utf8'), s_common.guid(count).encode('utf8') + byts, db=foo)\n\n self.true(await asyncio.wait_for(slab.lockdoneevent.wait(), 8))\n\n lockmem = s_thisplat.getCurrentLockedMemory()\n\n # TODO: make this test reliable\n self.ge(lockmem, 0)\n\n async def test_math(self):\n self.eq(16, s_lmdbslab._florpo2(16))\n self.eq(16, s_lmdbslab._florpo2(17))\n self.eq(16, s_lmdbslab._florpo2(31))\n\n self.eq(16, s_lmdbslab._ceilpo2(16))\n self.eq(16, s_lmdbslab._ceilpo2(15))\n self.eq(16, s_lmdbslab._ceilpo2(9))\n\n self.eq(4, s_lmdbslab._roundup(4, 2))\n self.eq(4, s_lmdbslab._roundup(3, 2))\n\ndef _writeproc(path):\n\n async def lotsofwrites(path):\n byts = b'\\x00' * 256\n os.remove(pathlib.Path(path).with_suffix('.opts.yaml'))\n async with await s_lmdbslab.Slab.anit(path, map_size=100000) as slab:\n foo = slab.initdb('foo', dupsort=True)\n mapsize = slab.mapsize\n count = 0\n while mapsize == slab.mapsize:\n count += 1\n slab.put(b'abcd', s_common.guid(count).encode('utf8') + byts, dupdata=True, db=foo)\n asyncio.run(lotsofwrites(path))\n", "sub_path": "synapse/tests/test_lib_lmdbslab.py", "file_name": "test_lib_lmdbslab.py", "file_ext": "py", "file_size_in_byte": 59604, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.getpid", "line_number": 22, "usage_type": "call"}, {"api_name": "synapse.tests.utils.SynTest", "line_number": 30, "usage_type": "attribute"}, {"api_name": "synapse.tests.utils", "line_number": 30, "usage_type": "name"}, {"api_name": "synapse.tests.utils.SynTest.__init__", "line_number": 33, "usage_type": "call"}, {"api_name": "synapse.tests.utils.SynTest", "line_number": 33, "usage_type": "attribute"}, {"api_name": "synapse.tests.utils", "line_number": 33, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 40, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 40, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 40, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.syncLoopOnce", "line_number": 67, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 67, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 67, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.syncLoopOnce", "line_number": 86, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 86, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 86, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "synapse.exc.BadArg", "line_number": 104, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 104, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 104, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 104, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 104, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 106, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 106, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 106, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 111, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 114, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.ScanBack", "line_number": 228, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 228, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.ScanBack", "line_number": 235, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 235, "usage_type": "name"}, {"api_name": "synapse.exc.DataAlreadyExists", "line_number": 284, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 284, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 297, "usage_type": "call"}, {"api_name": "os.path", "line_number": 297, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 298, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 298, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 298, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 299, "usage_type": "call"}, {"api_name": "synapse.exc.IsFini", "line_number": 320, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 320, "usage_type": "name"}, {"api_name": "synapse.exc.IsFini", "line_number": 321, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 321, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.getSlabsInDir", "line_number": 323, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 323, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 323, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 328, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 328, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 328, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 337, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 341, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 341, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 341, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 345, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 345, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 350, "usage_type": "call"}, {"api_name": "os.path", "line_number": 350, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 356, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 357, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 357, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 357, "usage_type": "name"}, {"api_name": "synapse.lib.base.Waiter", "line_number": 361, "usage_type": "call"}, {"api_name": "synapse.lib.base", "line_number": 361, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 364, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 364, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 374, "usage_type": "call"}, {"api_name": "os.path", "line_number": 374, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 377, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 377, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 377, "usage_type": "name"}, {"api_name": "synapse.exc.DbOutOfSpace", "line_number": 382, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 382, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 385, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 385, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 388, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 388, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 388, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 396, "usage_type": "call"}, {"api_name": "os.path", "line_number": 396, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 398, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 398, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 398, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 404, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 404, "usage_type": "name"}, {"api_name": "synapse.common.int64en", "line_number": 408, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 408, "usage_type": "name"}, {"api_name": "synapse.common.int64en", "line_number": 409, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 409, "usage_type": "name"}, {"api_name": "synapse.common.int64en", "line_number": 439, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 439, "usage_type": "name"}, {"api_name": "synapse.common.int64en", "line_number": 440, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 440, "usage_type": "name"}, {"api_name": "synapse.common.int64en", "line_number": 451, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 451, "usage_type": "name"}, {"api_name": "synapse.common.int64en", "line_number": 452, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 452, "usage_type": "name"}, {"api_name": "synapse.common.int64en", "line_number": 458, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 458, "usage_type": "name"}, {"api_name": "synapse.common.int64en", "line_number": 459, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 459, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 484, "usage_type": "call"}, {"api_name": "os.path", "line_number": 484, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 486, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 486, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 486, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 655, "usage_type": "call"}, {"api_name": "os.path", "line_number": 655, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 656, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 656, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 656, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 663, "usage_type": "call"}, {"api_name": "os.path", "line_number": 663, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 665, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 665, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 665, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 672, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 672, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 673, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 673, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 694, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 694, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 701, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 701, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 707, "usage_type": "call"}, {"api_name": "os.path", "line_number": 707, "usage_type": "attribute"}, {"api_name": "synapse.common.guid", "line_number": 722, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 722, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 724, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 724, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 733, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 733, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 733, "usage_type": "name"}, {"api_name": "synapse.exc.IsReadOnly", "line_number": 742, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 742, "usage_type": "name"}, {"api_name": "synapse.exc.IsReadOnly", "line_number": 743, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 743, "usage_type": "name"}, {"api_name": "synapse.exc.IsReadOnly", "line_number": 744, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 744, "usage_type": "name"}, {"api_name": "synapse.exc.IsReadOnly", "line_number": 745, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 745, "usage_type": "name"}, {"api_name": "synapse.exc.IsReadOnly", "line_number": 746, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 746, "usage_type": "name"}, {"api_name": "synapse.exc.IsReadOnly", "line_number": 747, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 747, "usage_type": "name"}, {"api_name": "multiprocessing.get_context", "line_number": 752, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 768, "usage_type": "call"}, {"api_name": "os.path", "line_number": 768, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 771, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 771, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 771, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 796, "usage_type": "call"}, {"api_name": "os.path", "line_number": 796, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 799, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 799, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 799, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 804, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 804, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 817, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 817, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 822, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 822, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 830, "usage_type": "call"}, {"api_name": "os.path", "line_number": 830, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 831, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 831, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 831, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.GuidStor", "line_number": 832, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 832, "usage_type": "name"}, {"api_name": "synapse.common.novalu", "line_number": 846, "usage_type": "attribute"}, {"api_name": "synapse.common", "line_number": 846, "usage_type": "name"}, {"api_name": "synapse.exc.NotMsgpackSafe", "line_number": 849, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 849, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 851, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 851, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 851, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.GuidStor", "line_number": 852, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 852, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 863, "usage_type": "call"}, {"api_name": "os.path", "line_number": 863, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 864, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 864, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 864, "usage_type": "name"}, {"api_name": "asyncio.wait_for", "line_number": 865, "usage_type": "call"}, {"api_name": "asyncio.wait_for", "line_number": 874, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab._mapsizeround", "line_number": 880, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 880, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._mapsizeround", "line_number": 881, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 881, "usage_type": "name"}, {"api_name": "synapse.lib.const.mebibyte", "line_number": 881, "usage_type": "attribute"}, {"api_name": "synapse.lib.const", "line_number": 881, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._mapsizeround", "line_number": 882, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 882, "usage_type": "name"}, {"api_name": "synapse.lib.const.mebibyte", "line_number": 882, "usage_type": "attribute"}, {"api_name": "synapse.lib.const", "line_number": 882, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._mapsizeround", "line_number": 883, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 883, "usage_type": "name"}, {"api_name": "synapse.lib.const.gibibyte", "line_number": 883, "usage_type": "attribute"}, {"api_name": "synapse.lib.const", "line_number": 883, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._mapsizeround", "line_number": 884, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 884, "usage_type": "name"}, {"api_name": "synapse.lib.const.gibibyte", "line_number": 884, "usage_type": "attribute"}, {"api_name": "synapse.lib.const", "line_number": 884, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._mapsizeround", "line_number": 885, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 885, "usage_type": "name"}, {"api_name": "synapse.lib.const.gibibyte", "line_number": 885, "usage_type": "attribute"}, {"api_name": "synapse.lib.const", "line_number": 885, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 893, "usage_type": "call"}, {"api_name": "os.path", "line_number": 893, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 897, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 897, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 897, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 899, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 899, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 900, "usage_type": "call"}, {"api_name": "synapse.common.guid", "line_number": 902, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 902, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 911, "usage_type": "call"}, {"api_name": "synapse.lib.const.mebibyte", "line_number": 911, "usage_type": "attribute"}, {"api_name": "synapse.lib.const", "line_number": 911, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 912, "usage_type": "call"}, {"api_name": "synapse.lib.const.kibibyte", "line_number": 912, "usage_type": "attribute"}, {"api_name": "synapse.lib.const", "line_number": 912, "usage_type": "name"}, {"api_name": "synapse.tests.utils.alist", "line_number": 920, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 922, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 933, "usage_type": "call"}, {"api_name": "os.path", "line_number": 933, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 936, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 936, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 936, "usage_type": "name"}, {"api_name": "synapse.glob.iAmLoop", "line_number": 952, "usage_type": "call"}, {"api_name": "synapse.glob", "line_number": 952, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 954, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 954, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 954, "usage_type": "name"}, {"api_name": "asyncio.run", "line_number": 960, "usage_type": "call"}, {"api_name": "multiprocessing.get_context", "line_number": 966, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 968, "usage_type": "call"}, {"api_name": "os.path", "line_number": 968, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 969, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 969, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 969, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 971, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 971, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 971, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 984, "usage_type": "call"}, {"api_name": "os.path", "line_number": 984, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 986, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 986, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 986, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchName", "line_number": 992, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 992, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchName", "line_number": 995, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 995, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchName", "line_number": 998, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 998, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchName", "line_number": 1001, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 1001, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchName", "line_number": 1004, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 1004, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchName", "line_number": 1007, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 1007, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchName", "line_number": 1010, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 1010, "usage_type": "name"}, {"api_name": "synapse.exc.DupName", "line_number": 1014, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 1014, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1051, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1051, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1051, "usage_type": "name"}, {"api_name": "asyncio.Event", "line_number": 1064, "usage_type": "call"}, {"api_name": "asyncio.wait_for", "line_number": 1079, "usage_type": "call"}, {"api_name": "asyncio.wait_for", "line_number": 1086, "usage_type": "call"}, {"api_name": "asyncio.wait_for", "line_number": 1186, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1210, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1212, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1212, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1212, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.SlabAbrv", "line_number": 1213, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1213, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchAbrv", "line_number": 1223, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 1223, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1226, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1226, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1226, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.SlabAbrv", "line_number": 1227, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1227, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchAbrv", "line_number": 1244, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 1244, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1256, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1256, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1258, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1258, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1258, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.HotKeyVal.anit", "line_number": 1259, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.HotKeyVal", "line_number": 1259, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1259, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1269, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1269, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1269, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.HotKeyVal.anit", "line_number": 1270, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.HotKeyVal", "line_number": 1270, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1270, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1278, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1278, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1280, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1280, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1280, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.HotCount.anit", "line_number": 1281, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.HotCount", "line_number": 1281, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1281, "usage_type": "name"}, {"api_name": "synapse.common.int64un", "line_number": 1300, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 1300, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1309, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1309, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1310, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1310, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1310, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1315, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1315, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1315, "usage_type": "name"}, {"api_name": "synapse.exc.SlabAlreadyOpen", "line_number": 1320, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 1320, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1320, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1320, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1320, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1326, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1326, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1327, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1327, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1329, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1329, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1329, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 1335, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1337, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1337, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1337, "usage_type": "name"}, {"api_name": "synapse.exc.DataAlreadyExists", "line_number": 1341, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 1341, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1347, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1347, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1349, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1349, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1349, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1368, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1368, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1369, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1369, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1369, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1380, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1380, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1381, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1381, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1381, "usage_type": "name"}, {"api_name": "synapse.common.now", "line_number": 1382, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 1382, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab.Hist", "line_number": 1383, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1383, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 1385, "usage_type": "call"}, {"api_name": "synapse.common.now", "line_number": 1387, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 1387, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 1397, "usage_type": "call"}, {"api_name": "synapse.tests.utils.SynTest", "line_number": 1434, "usage_type": "attribute"}, {"api_name": "synapse.tests.utils", "line_number": 1434, "usage_type": "name"}, {"api_name": "synapse.lib.thisplat.getCurrentLockedMemory", "line_number": 1439, "usage_type": "call"}, {"api_name": "synapse.lib.thisplat", "line_number": 1439, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1443, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1443, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1444, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1444, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1444, "usage_type": "name"}, {"api_name": "asyncio.wait_for", "line_number": 1446, "usage_type": "call"}, {"api_name": "synapse.lib.thisplat.getCurrentLockedMemory", "line_number": 1447, "usage_type": "call"}, {"api_name": "synapse.lib.thisplat", "line_number": 1447, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1460, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1460, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1462, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1462, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1462, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 1466, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 1466, "usage_type": "name"}, {"api_name": "asyncio.wait_for", "line_number": 1468, "usage_type": "call"}, {"api_name": "synapse.lib.thisplat.getCurrentLockedMemory", "line_number": 1470, "usage_type": "call"}, {"api_name": "synapse.lib.thisplat", "line_number": 1470, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._florpo2", "line_number": 1476, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1476, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._florpo2", "line_number": 1477, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1477, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._florpo2", "line_number": 1478, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1478, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._ceilpo2", "line_number": 1480, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1480, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._ceilpo2", "line_number": 1481, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1481, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._ceilpo2", "line_number": 1482, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1482, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._roundup", "line_number": 1484, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1484, "usage_type": "name"}, {"api_name": "synapse.lib.lmdbslab._roundup", "line_number": 1485, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1485, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 1491, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 1491, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab.anit", "line_number": 1492, "usage_type": "call"}, {"api_name": "synapse.lib.lmdbslab.Slab", "line_number": 1492, "usage_type": "attribute"}, {"api_name": "synapse.lib.lmdbslab", "line_number": 1492, "usage_type": "name"}, {"api_name": "synapse.common.guid", "line_number": 1498, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 1498, "usage_type": "name"}, {"api_name": "asyncio.run", "line_number": 1499, "usage_type": "call"}]} +{"seq_id": "259604181", "text": "import numpy as np\nimport cv2\nimport scipy.misc\nimport skimage.draw as draw\nimport math\nfrom scipy import ndimage\n\n\nr_navigable_thresh, g_navigable_thresh, b_navigable_thresh = 120, 60, 120\ndst_size = 5\nbottom_offset = 6\nimg_shape = (160, 320, 3)\nscale = 2 * dst_size \nworld_size = 200 \nsource_points = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])\ndestination_points = np.float32([[img_shape[1]/2 - dst_size, img_shape[0] - bottom_offset],\n [img_shape[1]/2 + dst_size, img_shape[0] - bottom_offset],\n [img_shape[1]/2 + dst_size, img_shape[0] - 2*dst_size - bottom_offset], \n [img_shape[1]/2 - dst_size, img_shape[0] - 2*dst_size - bottom_offset],\n ])\n\ndef perception_step(Rover): \n\n warped_img = perspect_transform(Rover.img, source_points, destination_points)\n img = color_thresh(warped_img, rgb_thresh=(r_navigable_thresh, g_navigable_thresh, b_navigable_thresh), rgb_shape=True) * 105.0\n\n # Get threshold images in both binary and rgb\n rgb_threshed_img = color_thresh(warped_img, rgb_thresh=(r_navigable_thresh, g_navigable_thresh, b_navigable_thresh), rgb_shape=True) * 105.0\n binary_threshed_img = color_thresh(warped_img, rgb_thresh=(r_navigable_thresh, g_navigable_thresh, b_navigable_thresh))\n\n # Navigable points\n navigable_rover_x, navigable_rover_y = rover_coords(binary_threshed_img)\n navigable_world_x, navigable_world_y = pix_to_world(navigable_rover_x, navigable_rover_y, Rover.pos[0], Rover.pos[1], Rover.yaw, world_size, scale)\n Rover.worldmap[navigable_world_y, navigable_world_x, 2] += 1 \n\n # Obstacle points\n obstacle_rover_x, obstacle_rover_y = rover_coords(np.invert(binary_threshed_img))\n obstacle_world_x, obstacle_world_y = pix_to_world(obstacle_rover_x, obstacle_rover_y, Rover.pos[0], Rover.pos[1], Rover.yaw, world_size, scale)\n Rover.worldmap[obstacle_world_y, obstacle_world_x, 0] += 0.5\n # Removes a bit of navigable\n Rover.worldmap[obstacle_world_y, obstacle_world_x, 2] -= 0.3\n\n # Calculate coordinates\n distances, angles = to_polar_coords(navigable_rover_x, navigable_rover_y)\n avg_angle = np.mean(angles) \n\n # Calculate angles\n Rover.nav_dists = distances**2\n Rover.nav_angles = angles\n \n steer_angle = np.clip(np.mean(Rover.nav_angles * 180/np.pi), -15, 15)\n navigable_distance = np.mean(distances)\n direction_line_angle = math.radians(steer_angle) - math.pi/2 \n \n draw_direction_line(img, direction_line_angle, navigable_distance)\n\n Rover.vision_image = img \n return Rover\n\ndef draw_direction_line(img, angle, distance):\n # Calculates the final x,y point \n line_length = 160 - distance \n trans_x = 159 + line_length * math.cos(angle) * -1\n trans_y = line_length * math.sin(angle) * -1\n \n # Do nothing if calculations failed\n if np.isnan(trans_x) or np.isnan(trans_y):\n return\n\n rr, cc, val = draw.line_aa(159, 160, np.int(trans_y), np.int(trans_x))\n img[rr, cc, 0] = val * 250\n img[rr, cc, 1:2] = 0 \n\n\ndef process_rocks():\n pass\n # rock_thresholds = rock_threshold(warped_img) > 0\n # rock_center_of_mass = ndimage.measurements.center_of_mass(rock_thresholds) # TODO: handle multiple rocks \n\n # img[:, :, 0] = (rock_thresholds * 255.0)\n\n # Navigable rocks\n # rock_rover_coords_x, rock_rover_coords_y = rover_coords(rock_thresholds)\n # rock_x_world, rock_y_world = pix_to_world(rock_rover_coords_x, rock_rover_coords_y, Rover.pos[0], Rover.pos[1], Rover.yaw, 200, scale) \n\n # Draw circle on rocks \n # rr, cc = draw.circle(rock_center_of_mass[0], rock_center_of_mass[1], 5) \n # img[rr, cc, 2] = 200.0\n\n # # Calculate slope between Rover and rock\n # slope = (rock_center_of_mass[1] - 159) / (rock_center_of_mass[0] - 160) \n # angle_to_rock = math.tan(slope)\n # print('slope', slope, math.tan(slope), angle_to_rock)\n \n\ndef color_thresh(img, rgb_thresh=(160, 160, 160), rgb_shape=False):\n # Create an array of zeros same xy size as img, but single channel\n color_select = np.zeros_like(img[:,:,0])\n # Require that each pixel be above all three threshold values in RGB\n # above_thresh will now contain a boolean array with \"True\"\n # where threshold was met\n above_thresh = (img[:,:,0] > rgb_thresh[0]) \\\n & (img[:,:,1] > rgb_thresh[1]) \\\n & (img[:,:,2] > rgb_thresh[2])\n # Index the array of zeros with the boolean array and set to 1\n color_select[above_thresh] = 1\n \n # If this flag is on, returns an RGB dimension image, instead of single channel\n if rgb_shape:\n color_select = np.stack((color_select,)*3, axis=-1)\n \n # Return the binary image\n return color_select\n\n# Define a function to convert from image coords to rover coords\ndef rover_coords(binary_img):\n # Identify nonzero pixels\n ypos, xpos = binary_img.nonzero()\n # Calculate pixel positions with reference to the rover position being at the \n # center bottom of the image. \n x_pixel = -(ypos - binary_img.shape[0]).astype(np.float)\n y_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float)\n return x_pixel, y_pixel\n\n\n# Define a function to convert to radial coords in rover space\ndef to_polar_coords(x_pixel, y_pixel):\n # Convert (x_pixel, y_pixel) to (distance, angle) \n # in polar coordinates in rover space\n # Calculate distance to each pixel\n dist = np.sqrt(x_pixel**2 + y_pixel**2)\n # Calculate angle away from vertical for each pixel\n angles = np.arctan2(y_pixel, x_pixel)\n return dist, angles\n\n# Define a function to map rover space pixels to world space\ndef rotate_pix(xpix, ypix, yaw):\n # Convert yaw to radians\n yaw_rad = yaw * np.pi / 180\n xpix_rotated = (xpix * np.cos(yaw_rad)) - (ypix * np.sin(yaw_rad))\n \n ypix_rotated = (xpix * np.sin(yaw_rad)) + (ypix * np.cos(yaw_rad))\n # Return the result \n return xpix_rotated, ypix_rotated\n\ndef translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale): \n # Apply a scaling and a translation\n xpix_translated = (xpix_rot / scale) + xpos\n ypix_translated = (ypix_rot / scale) + ypos\n # Return the result \n return xpix_translated, ypix_translated\n\n\n# Define a function to apply rotation and translation (and clipping)\n# Once you define the two functions above this function should work\ndef pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):\n # Apply rotation\n xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)\n # Apply translation\n xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)\n # Perform rotation, translation and clipping all at once\n x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)\n y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)\n # Return the result\n return x_pix_world, y_pix_world\n\n# Define a function to perform a perspective transform\ndef perspect_transform(img, src, dst):\n \n M = cv2.getPerspectiveTransform(src, dst)\n warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image\n \n return warped\n\ndef rock_threshold(img, rgb_thresh=(160, 140, 90), rgb_shape=False):\n\n color_select = np.zeros_like(img[:,:,0])\n # Require that each pixel be above all three threshold values in RGB\n # above_thresh will now contain a boolean array with \"True\"\n # where threshold was met\n above_thresh = (img[:,:,0] > rgb_thresh[0]) \\\n & (img[:,:,1] > rgb_thresh[1]) \\\n & (img[:,:,2] < rgb_thresh[2]) # ATTENTION: reversed comparison for blue\n # Index the array of zeros with the boolean array and set to 1\n color_select[above_thresh] = 1\n \n # If this flag is on, returns an RGB dimension image, instead of single channel\n if rgb_shape:\n color_select = np.stack((color_select,)*3, axis=-1)\n \n return color_select", "sub_path": "code/perception.py", "file_name": "perception.py", "file_ext": "py", "file_size_in_byte": 7837, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.float32", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.invert", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 52, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 53, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 53, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 63, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 67, "usage_type": "call"}, {"api_name": "skimage.draw.line_aa", "line_number": 70, "usage_type": "call"}, {"api_name": "skimage.draw", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.int", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.int_", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.int_", "line_number": 163, "usage_type": "call"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 170, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 189, "usage_type": "call"}]} +{"seq_id": "618089809", "text": "from __future__ import unicode_literals\n\nimport json\nfrom six.moves.urllib.parse import urlparse, parse_qs\n\nfrom moto.core.responses import _TemplateEnvironmentMixin\nfrom .models import glacier_backends\nfrom .utils import region_from_glacier_url, vault_from_glacier_url\n\n\nclass GlacierResponse(_TemplateEnvironmentMixin):\n def __init__(self, backend):\n super(GlacierResponse, self).__init__()\n self.backend = backend\n\n @classmethod\n def all_vault_response(clazz, request, full_url, headers):\n region_name = region_from_glacier_url(full_url)\n response_instance = GlacierResponse(glacier_backends[region_name])\n return response_instance._all_vault_response(request, full_url, headers)\n\n def _all_vault_response(self, request, full_url, headers):\n vaults = self.backend.list_vaules()\n response = json.dumps(\n {\"Marker\": None, \"VaultList\": [vault.to_dict() for vault in vaults]}\n )\n\n headers[\"content-type\"] = \"application/json\"\n return 200, headers, response\n\n @classmethod\n def vault_response(clazz, request, full_url, headers):\n region_name = region_from_glacier_url(full_url)\n response_instance = GlacierResponse(glacier_backends[region_name])\n return response_instance._vault_response(request, full_url, headers)\n\n def _vault_response(self, request, full_url, headers):\n method = request.method\n parsed_url = urlparse(full_url)\n querystring = parse_qs(parsed_url.query, keep_blank_values=True)\n vault_name = vault_from_glacier_url(full_url)\n\n if method == \"GET\":\n return self._vault_response_get(vault_name, querystring, headers)\n elif method == \"PUT\":\n return self._vault_response_put(vault_name, querystring, headers)\n elif method == \"DELETE\":\n return self._vault_response_delete(vault_name, querystring, headers)\n\n def _vault_response_get(self, vault_name, querystring, headers):\n vault = self.backend.get_vault(vault_name)\n headers[\"content-type\"] = \"application/json\"\n return 200, headers, json.dumps(vault.to_dict())\n\n def _vault_response_put(self, vault_name, querystring, headers):\n self.backend.create_vault(vault_name)\n return 201, headers, \"\"\n\n def _vault_response_delete(self, vault_name, querystring, headers):\n self.backend.delete_vault(vault_name)\n return 204, headers, \"\"\n\n @classmethod\n def vault_archive_response(clazz, request, full_url, headers):\n region_name = region_from_glacier_url(full_url)\n response_instance = GlacierResponse(glacier_backends[region_name])\n return response_instance._vault_archive_response(request, full_url, headers)\n\n def _vault_archive_response(self, request, full_url, headers):\n method = request.method\n if hasattr(request, \"body\"):\n body = request.body\n else:\n body = request.data\n description = \"\"\n if \"x-amz-archive-description\" in request.headers:\n description = request.headers[\"x-amz-archive-description\"]\n parsed_url = urlparse(full_url)\n querystring = parse_qs(parsed_url.query, keep_blank_values=True)\n vault_name = full_url.split(\"/\")[-2]\n\n if method == \"POST\":\n return self._vault_archive_response_post(\n vault_name, body, description, querystring, headers\n )\n else:\n return 400, headers, \"400 Bad Request\"\n\n def _vault_archive_response_post(\n self, vault_name, body, description, querystring, headers\n ):\n vault = self.backend.get_vault(vault_name)\n vault_id = vault.create_archive(body, description)\n headers[\"x-amz-archive-id\"] = vault_id\n return 201, headers, \"\"\n\n @classmethod\n def vault_archive_individual_response(clazz, request, full_url, headers):\n region_name = region_from_glacier_url(full_url)\n response_instance = GlacierResponse(glacier_backends[region_name])\n return response_instance._vault_archive_individual_response(\n request, full_url, headers\n )\n\n def _vault_archive_individual_response(self, request, full_url, headers):\n method = request.method\n vault_name = full_url.split(\"/\")[-3]\n archive_id = full_url.split(\"/\")[-1]\n\n if method == \"DELETE\":\n vault = self.backend.get_vault(vault_name)\n vault.delete_archive(archive_id)\n return 204, headers, \"\"\n\n @classmethod\n def vault_jobs_response(clazz, request, full_url, headers):\n region_name = region_from_glacier_url(full_url)\n response_instance = GlacierResponse(glacier_backends[region_name])\n return response_instance._vault_jobs_response(request, full_url, headers)\n\n def _vault_jobs_response(self, request, full_url, headers):\n method = request.method\n if hasattr(request, \"body\"):\n body = request.body\n else:\n body = request.data\n account_id = full_url.split(\"/\")[1]\n vault_name = full_url.split(\"/\")[-2]\n\n if method == \"GET\":\n jobs = self.backend.list_jobs(vault_name)\n headers[\"content-type\"] = \"application/json\"\n return (\n 200,\n headers,\n json.dumps(\n {\"JobList\": [job.to_dict() for job in jobs], \"Marker\": None}\n ),\n )\n elif method == \"POST\":\n json_body = json.loads(body.decode(\"utf-8\"))\n job_type = json_body[\"Type\"]\n archive_id = None\n if \"ArchiveId\" in json_body:\n archive_id = json_body[\"ArchiveId\"]\n if \"Tier\" in json_body:\n tier = json_body[\"Tier\"]\n else:\n tier = \"Standard\"\n job_id = self.backend.initiate_job(vault_name, job_type, tier, archive_id)\n headers[\"x-amz-job-id\"] = job_id\n headers[\"Location\"] = \"/{0}/vaults/{1}/jobs/{2}\".format(\n account_id, vault_name, job_id\n )\n return 202, headers, \"\"\n\n @classmethod\n def vault_jobs_individual_response(clazz, request, full_url, headers):\n region_name = region_from_glacier_url(full_url)\n response_instance = GlacierResponse(glacier_backends[region_name])\n return response_instance._vault_jobs_individual_response(\n request, full_url, headers\n )\n\n def _vault_jobs_individual_response(self, request, full_url, headers):\n vault_name = full_url.split(\"/\")[-3]\n archive_id = full_url.split(\"/\")[-1]\n\n vault = self.backend.get_vault(vault_name)\n job = vault.describe_job(archive_id)\n return 200, headers, json.dumps(job.to_dict())\n\n @classmethod\n def vault_jobs_output_response(clazz, request, full_url, headers):\n region_name = region_from_glacier_url(full_url)\n response_instance = GlacierResponse(glacier_backends[region_name])\n return response_instance._vault_jobs_output_response(request, full_url, headers)\n\n def _vault_jobs_output_response(self, request, full_url, headers):\n vault_name = full_url.split(\"/\")[-4]\n job_id = full_url.split(\"/\")[-2]\n vault = self.backend.get_vault(vault_name)\n if vault.job_ready(job_id):\n output = vault.get_job_output(job_id)\n if isinstance(output, dict):\n headers[\"content-type\"] = \"application/json\"\n return 200, headers, json.dumps(output)\n else:\n headers[\"content-type\"] = \"application/octet-stream\"\n return 200, headers, output\n else:\n return 404, headers, \"404 Not Found\"\n", "sub_path": "moto/glacier/responses.py", "file_name": "responses.py", "file_ext": "py", "file_size_in_byte": 7764, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "moto.core.responses._TemplateEnvironmentMixin", "line_number": 11, "usage_type": "name"}, {"api_name": "utils.region_from_glacier_url", "line_number": 18, "usage_type": "call"}, {"api_name": "models.glacier_backends", "line_number": 19, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.region_from_glacier_url", "line_number": 33, "usage_type": "call"}, {"api_name": "models.glacier_backends", "line_number": 34, "usage_type": "name"}, {"api_name": "six.moves.urllib.parse.urlparse", "line_number": 39, "usage_type": "call"}, {"api_name": "six.moves.urllib.parse.parse_qs", "line_number": 40, "usage_type": "call"}, {"api_name": "utils.vault_from_glacier_url", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 53, "usage_type": "call"}, {"api_name": "utils.region_from_glacier_url", "line_number": 65, "usage_type": "call"}, {"api_name": "models.glacier_backends", "line_number": 66, "usage_type": "name"}, {"api_name": "six.moves.urllib.parse.urlparse", "line_number": 78, "usage_type": "call"}, {"api_name": "six.moves.urllib.parse.parse_qs", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.region_from_glacier_url", "line_number": 99, "usage_type": "call"}, {"api_name": "models.glacier_backends", "line_number": 100, "usage_type": "name"}, {"api_name": "utils.region_from_glacier_url", "line_number": 117, "usage_type": "call"}, {"api_name": "models.glacier_backends", "line_number": 118, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 136, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 141, "usage_type": "call"}, {"api_name": "utils.region_from_glacier_url", "line_number": 159, "usage_type": "call"}, {"api_name": "models.glacier_backends", "line_number": 160, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 171, "usage_type": "call"}, {"api_name": "utils.region_from_glacier_url", "line_number": 175, "usage_type": "call"}, {"api_name": "models.glacier_backends", "line_number": 176, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "545314427", "text": "import random\nimport time\nfrom decimal import *\nimport numpy\nfrom datetime import datetime\nfrom openpyxl import load_workbook\nfrom DMN import *\nimport matplotlib.pyplot as plt\n\n\ndef calc_pos(ind, col_count):\n ind = ind\n row = ind / col_count\n col = ind % col_count\n return {'row': row, 'col': col}\n\n\ndef main():\n index = 0\n array = random.randint(0, 100)\n print(array)\n wb3 = load_workbook(filename=\"data/mirna_drug.xlsx\", read_only=True)\n ws3 = wb3[\"Sheet1\"]\n y_row_count = ws3.max_row - 1\n y_col_count = ws3.max_column - 1\n\n y_matrix = numpy.zeros((y_row_count, y_col_count))\n y_matrix_t = numpy.transpose(y_matrix)\n x_count = 0\n i = 0\n for row in ws3.rows:\n if (i > 0): # skip first row\n for j in range(y_col_count):\n j += 1 # skip first column\n if (row[j].value):\n y_matrix[i - 1][j - 1] = row[j].value # (i-1) and (j-1) indexes used for skipped first row and col\n i += 1\n\n wb = load_workbook(filename=\"data/mirna_sim.xlsx\", read_only=True)\n ws = wb[\"Sheet1\"]\n sm_row_count = ws.max_row - 1\n\n sm_matrix = numpy.zeros((sm_row_count, sm_row_count))\n dm_matrix = numpy.zeros((sm_row_count, sm_row_count))\n x_count = 0\n i = 0\n for row in ws.rows:\n if (i > 0): # skip first row\n for j in range(sm_row_count):\n j += 1 # skip first column\n if (row[j].value):\n sm_matrix[i - 1][j - 1] = row[j].value # (i-1) and (j-1) indexes used for skipped first row and col\n i += 1\n\n for x in range(sm_row_count):\n sm_x = sm_matrix[x]\n for y in range(sm_row_count):\n x_count += sm_x[y]\n dm_matrix[x][x] = Decimal(x_count)\n x_count = 0\n\n wb2 = load_workbook(filename=\"data/drug_sim.xlsx\", read_only=True)\n ws2 = wb2[\"Sheet1\"]\n su_row_count = ws2.max_row - 1\n\n su_matrix = numpy.zeros((su_row_count, su_row_count))\n du_matrix = numpy.zeros((su_row_count, su_row_count))\n x_count = 0\n i = 0\n for row in ws2.rows:\n if (i > 0): # skip first row\n for j in range(su_row_count):\n j += 1 # skip first column\n if (row[j].value):\n su_matrix[i - 1][j - 1] = row[j].value # (i-1) and (j-1) indexes used for skipped first row and col\n i += 1\n for x in range(su_row_count):\n su_x = su_matrix[x]\n for y in range(su_row_count):\n x_count += su_x[y]\n du_matrix[x][x] = Decimal(x_count)\n x_count = 0\n\n eta_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]\n thr_list = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17,\n 0.18, 0.19, 0.2, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.3, 0.31, 0.32, 0.33, 0.34,\n 0.35, 0.36, 0.37, 0.38, 0.39, 0.4, 0.41, 0.42, 0.43, 0.44, 0.45, 0.46, 0.47, 0.48, 0.49, 0.5, 0.51,\n 0.52, 0.53, 0.54, 0.55, 0.56, 0.57, 0.58, 0.59, 0.6, 0.61, 0.62, 0.63, 0.64, 0.65, 0.66, 0.67, 0.68,\n 0.69, 0.7, 0.71, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79, 0.8, 0.81, 0.82, 0.83, 0.84, 0.85,\n 0.86, 0.87, 0.88, 0.89, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99]\n K_list = [1,2,3,4,5,6,7,8,9,10]\n k_list = [50,100,150]\n lambda_l_list = [0.25,0.5,1,2] \n lambda_m_list = [0,0.0001,0.001,0.01,0.1]\n cv_average = []\n #### begin of cross validation folding\n fold_count = 10\n\n one_element_array = DMN_N()\n one_element_array = list(map(int, one_element_array))\n # one_element_array = list(range(0, y_row_count * y_col_count))\n random.shuffle(one_element_array)\n random_indexes = numpy.array(one_element_array)\n\n folds_index = numpy.array_split(random_indexes, fold_count)\n\n cross_validation_result_file = open(\n \"cross_validation_by_element_fix_thr_result_\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".csv\", \"w\")\n cross_validation_result_file.write(\n 'fold_number,K,eta,k,lambda_l,lambda_m,thr,TP,TN,FP,FN,MCC,ACC,Sensitivity(TPR),Specificity(TNR),Precision(PPV),NPV,FNR,FPR,FDR,FOR,TS,F1,Informedness,Markedness\\n')\n TP = float(1)\n FN = float(1)\n TN = float(1)\n FP = float(1)\n cross_validation_average_file = open(\n \"cross_validation_by_element_Average_\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".csv\", \"w\")\n cross_validation_average_file.write(\n 'K,eta,k,lambda_l,lambda_m,thr,MCC,ACC,Sensitivity(TPR),Specificity(TNR),Precision(PPV),NPV,FNR,FPR,FDR,FOR,TS,F1,Informedness,Markedness,auc,auc_sum\\n')\n tp_sum = 0\n tn_sum = 0\n fp_sum = 0\n fn_sum = 0\n thr_value = numpy.zeros((14, len(thr_list)))\n # Avg_TP = 0\n # Avg_TN = 0\n # Avg_FP = 0\n # Avg_FN = 0\n for f in range(fold_count):\n y_matrix_folded = numpy.copy(y_matrix)\n y_matrix_folded_none_zero = numpy.copy(y_matrix)\n for ind in folds_index[f]:\n result = calc_pos(ind, y_col_count)\n y_matrix_folded[int(result['row']), result['col']] = 0\n\n for K in K_list:\n for eta1 in eta_list:\n # Using neighbor information of drugs\n eta = eta1 ** (numpy.arange(0, K))\n y2_new1 = numpy.zeros_like(y_matrix_folded)\n empty_rows = numpy.where(~y_matrix_folded.any(axis=1))[0]\n empty_cols = numpy.where(~y_matrix_folded.any(axis=0))[0]\n for i in numpy.arange(0, max(numpy.shape(sm_matrix))):\n drug_sim = sm_matrix[i, :]\n drug_sim[i] = 0\n indices = numpy.arange(0, max(numpy.shape(sm_matrix))) # row or col\n drug_sim = numpy.delete(drug_sim, empty_rows)\n indices = numpy.delete(indices, empty_rows)\n indx = numpy.argsort(-drug_sim)\n indx = indx[0:K]\n indx = indices[indx]\n drug_sim = sm_matrix[i, :]\n\n aa = eta * drug_sim[indx]\n yy = y_matrix_folded[indx, :]\n bb = numpy.dot(aa, yy)\n cc = sum(drug_sim[indx])\n if (cc != 0):\n dd = bb / cc\n else:\n dd = 0\n y2_new1[i, :] = dd\n\t\t\t\t\n\t\t\t\t# Using neighbor information of microRNAs\n y2_new2 = numpy.zeros_like(y_matrix_folded)\n for j in numpy.arange(0, max(numpy.shape(su_matrix))):\n target_sim = numpy.array(su_matrix[j, :])\n target_sim[j] = 0\n indices = numpy.arange(0, max(numpy.shape(su_matrix)))\n target_sim = numpy.delete(target_sim, empty_cols)\n indices = numpy.delete(indices, empty_cols)\n indx = numpy.argsort(-target_sim)\n indx = indx[0:K]\n indx = indices[indx]\n target_sim = su_matrix[j, :]\n\n aa = numpy.transpose(eta * target_sim[indx])\n yy = y_matrix_folded[:, indx]\n bb = numpy.dot(yy, aa)\n cc = sum(target_sim[indx])\n if (cc != 0):\n dd = bb / cc\n else:\n dd = 0\n y2_new2[:, j] = dd\n\n y2_avg = (y2_new1 + y2_new2) / 2\n Y = numpy.maximum(y_matrix_folded, y2_avg)\n Y_t = numpy.transpose(Y)\n\n\n for k in k_list:\n for lambda_l in lambda_l_list:\n for lambda_m in lambda_m_list:\n W = numpy.random.uniform(low=0, high=1, size=(y_row_count, k))\n H = numpy.random.uniform(low=0, high=1, size=(y_col_count, k))\n counterw = 0\n counterh = 0\n w_cell_count = 0\n h_cell_count = 0\n flag = True\n iteratew = 0\n iterateh = 0\n lambda_m_sm = lambda_m * sm_matrix\n lambda_m_su = lambda_m * su_matrix\n lambda_m_dm = lambda_m * dm_matrix\n lambda_m_du = lambda_m * du_matrix\n while flag == True:\n W_old = numpy.copy(W)\n wn1 = numpy.matmul(Y, H)\n w_numerator = wn1 + numpy.matmul(lambda_m_sm, W)\n wd11 = numpy.matmul(W, numpy.transpose(H))\n w_denominator = (numpy.matmul(wd11, H)) + (lambda_l * W) + (\n numpy.matmul(lambda_m_dm, W))\n # H\n H_old = numpy.copy(H)\n hn1 = numpy.matmul(Y_t, W)\n hn2 = numpy.matmul(lambda_m_su, H)\n h_numerator = hn1 + hn2\n hd11 = numpy.matmul(H, numpy.transpose(W))\n h_denominator = (numpy.matmul(hd11, W)) + (lambda_l * H) + (\n numpy.matmul(lambda_m_du, H))\n\n for i in range(y_row_count):\n for j in range(k):\n wik = W[i][j]\n wik = wik * (w_numerator[i][j] / w_denominator[i][j])\n W[i][j] = wik\n w_difference = numpy.absolute(W - W_old)\n\n for i in range(y_col_count):\n for j in range(k):\n hjk = H[i][j]\n hjk = hjk * (h_numerator[i][j] / h_denominator[i][j])\n H[i][j] = hjk\n h_difference = numpy.absolute(H - H_old)\n for i in range(y_row_count):\n for j in range(k):\n counterw += 1\n if w_difference[i][j] <= 0.0001:\n w_cell_count += 1\n if w_cell_count == (y_row_count * k):\n iteratew = 1\n # print 'count-w=', counterw\n elif i == y_row_count:\n w_cell_count = 0\n for i in range(y_col_count):\n for j in range(k):\n counterh += 1\n if h_difference[i][j] <= 0.0001:\n h_cell_count += 1\n if h_cell_count == (y_col_count * k):\n iterateh = 1\n # print 'count-h=', counterh\n elif i == y_col_count:\n h_cell_count = 0\n if iteratew == 1 and iterateh == 1:\n if iterateh == 1:\n flag = False\n print(\"k = \", k, \",lambda_l = \", lambda_l, \",lambda_m = \", lambda_m,\n \" This is printed on: \", str(datetime.now()))\n YStar = numpy.matmul(W, numpy.transpose(H))\n YStar_array = numpy.array(YStar).flatten()\n numpy.savetxt(\n 'YStar(K=' + str(K) + ',eta=' + str(eta1) + ',k=' + str(k) + ',lambda_l=' + str(\n lambda_l) + ',lambda_m=' + str(lambda_m) + ').txt', YStar, '%0.5f')\n\n numpy.savetxt(\n 'YStar_array(K=' + str(K) + ',eta=' + str(eta1) + ',k=' + str(\n k) + ',lambda_l=' + str(\n lambda_l) + ',lambda_m=' + str(lambda_m) + ').txt', YStar_array, '%0.5f')\n\n count = 0\n for thr in thr_list:\n TP = float(1)\n TN = float(1)\n FN = float(1)\n FP = float(1)\n thr_none_zero = []\n thr_zero = []\n YStar_result = []\n test_result_zero = []\n test_result_none_zero = []\n for ind in folds_index[f]:\n result = calc_pos(ind, y_col_count)\n YStar_result.append(YStar[int(result['row']), result['col']])\n test_result_zero.append(y_matrix_folded[int(result['row']), result['col']])\n test_result_none_zero.append(\n y_matrix_folded_none_zero[int(result['row']), result['col']])\n\n positive_index = []\n negative_index = []\n index = 0\n for tnz in test_result_none_zero:\n if (tnz == 1):\n positive_index.append(index)\n else:\n negative_index.append(index)\n index = index + 1\n\n negative_index = random.sample(set(negative_index), len(positive_index))\n\n for tz_ind, tnz_ind in zip(negative_index, positive_index):\n if (test_result_none_zero[tnz_ind] == 1):\n if (YStar_result[tnz_ind] >= thr):\n TP += 1\n\n else:\n FN += 1\n\n else:\n if (YStar_result[tnz_ind] >= thr):\n FP += 1\n\n else:\n TN += 1\n\n if (test_result_none_zero[tz_ind] == 1):\n if (YStar_result[tz_ind] >= thr):\n TP += 1\n\n else:\n FN += 1\n\n else:\n if (YStar_result[tz_ind] >= thr):\n FP += 1\n\n else:\n TN += 1\n\n MCC = ((TP * TN) - (FP * FN)) / (\n numpy.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN)))\n ACC = (TP + TN) / (TP + TN + FP + FN)\n TPR = TP / (TP + FN)\n TNR = TN / (TN + FP)\n PPV = TP / (TP + FP)\n NPV = TN / (TN + FN)\n FNR = FN / (FN + TP)\n FPR = FP / (FP + TN)\n FDR = FP / (FP + TP)\n FOR = FN / (FN + TN)\n TS = TP / (TP + FN + FP)\n F1 = (2 * TP) / (2 * TP + FP + FN)\n BM = TPR + TNR - 1\n MK = PPV + NPV - 1\n\n cv_average.append([str(K) + ',' + str(eta1) + ',' + str(k) + ',' + str(\n lambda_l) + ',' + str(lambda_m) + ',' + str(thr), str(f), str(K), str(eta1), str(k),\n str(lambda_l), str(lambda_m), str(thr), str(MCC), str(ACC), str(TPR),\n str(TNR), str(PPV), str(NPV), str(FNR), str(FPR), str(FDR), str(FOR),\n str(TS), str(F1), str(BM), str(MK), str(0), str(0)])\n\n cross_validation_result_file.write(\n str(f) + \",\" +\n str(K) + \",\" +\n str(eta1) + \",\" +\n str(k) + \",\" +\n str(lambda_l) + \",\" +\n str(lambda_m) + \",\" +\n str(thr) + \",\" +\n str(TP) + \",\" +\n str(TN) + \",\" +\n str(FP) + \",\" +\n str(FN) + \",\" +\n str(MCC) + \",\" +\n str(ACC) + \",\" +\n str(TPR) + \",\" +\n str(TNR) + \",\" +\n str(PPV) + \",\" +\n str(NPV) + \",\" +\n str(FNR) + \",\" +\n str(FPR) + \",\" +\n str(FDR) + \",\" +\n str(FOR) + \",\" +\n str(TS) + \",\" +\n str(F1) + \",\" +\n str(BM) + \",\" +\n str(MK) + \"\\n\")\n\n # end of fold\n\n cv_average = sorted(cv_average)\n fpr_tpr_array = []\n fpr_tpr_array.append([1, 1])\n auc_array = []\n i = 1\n auc_sum = 0\n for x in range(len(thr_list) * len(K_list) * len(eta_list) * len(k_list) * len(lambda_l_list) * len(lambda_m_list)):\n sum_MCC = 0\n sum_ACC = 0\n sum_TPR = 0\n sum_TNR = 0\n sum_PPV = 0\n sum_NPV = 0\n sum_FNR = 0\n sum_FPR = 0\n sum_FDR = 0\n sum_FOR = 0\n sum_TS = 0\n sum_F1 = 0\n sum_BM = 0\n sum_MK = 0\n for y in range((x * fold_count), (x * fold_count) + 10):\n sum_MCC += float(cv_average[y][8])\n sum_ACC += float(cv_average[y][9])\n sum_TPR += float(cv_average[y][10])\n sum_TNR += float(cv_average[y][11])\n sum_PPV += float(cv_average[y][12])\n sum_NPV += float(cv_average[y][13])\n sum_FNR += float(cv_average[y][14])\n sum_FPR += float(cv_average[y][15])\n sum_FDR += float(cv_average[y][16])\n sum_FOR += float(cv_average[y][17])\n sum_TS += float(cv_average[y][18])\n sum_F1 += float(cv_average[y][19])\n sum_BM += float(cv_average[y][20])\n sum_MK += float(cv_average[y][21])\n average_MCC = sum_MCC / fold_count\n average_ACC = sum_ACC / fold_count\n average_TPR = sum_TPR / fold_count\n average_TNR = sum_TNR / fold_count\n average_PPV = sum_PPV / fold_count\n average_NPV = sum_NPV / fold_count\n average_FNR = sum_FNR / fold_count\n average_FPR = sum_FPR / fold_count\n average_FDR = sum_FDR / fold_count\n average_FOR = sum_FOR / fold_count\n average_TS = sum_TS / fold_count\n average_F1 = sum_F1 / fold_count\n average_BM = sum_BM / fold_count\n average_MK = sum_MK / fold_count\n\n fpr_tpr_array.append([average_FPR, average_TPR])\n fpr_tpr_i = fpr_tpr_array[i - 1]\n fpr_tpr_i_2 = fpr_tpr_array[i]\n auc_value = 0.5 * (fpr_tpr_i[0] - fpr_tpr_i_2[0]) * (fpr_tpr_i[1] + fpr_tpr_i_2[1])\n auc_array.append(auc_value)\n auc_sum += auc_value\n re = x % len(thr_list)\n i += 1\n\n # for i in range(len(thr_list)):\n cross_validation_average_file.write(\n str(cv_average[x * fold_count][2]) + \",\" +\n str(cv_average[x * fold_count][3]) + \",\" +\n str(cv_average[x * fold_count][4]) + \",\" +\n str(cv_average[x * fold_count][5]) + \",\" +\n str(cv_average[x * fold_count][6]) + \",\" +\n str(thr_list[re]) + \",\" +\n str(average_MCC) + \",\" +\n str(average_ACC) + \",\" +\n str(average_TPR) + \",\" +\n str(average_TNR) + \",\" +\n str(average_PPV) + \",\" +\n str(average_NPV) + \",\" +\n str(average_FNR) + \",\" +\n str(average_FPR) + \",\" +\n str(average_FDR) + \",\" +\n str(average_FOR) + \",\" +\n str(average_TS) + \",\" +\n str(average_F1) + \",\" +\n str(average_BM) + \",\" +\n str(average_MK) + \",\" +\n str(auc_value) + \",\" +\n str(auc_sum) + \"\\n\")\n # print x\n\n cross_validation_result_file.close()\n cross_validation_average_file.close()\n fpt_tpr_array_t = numpy.transpose(fpr_tpr_array)\n plt.plot(fpt_tpr_array_t[1], fpt_tpr_array_t[0])\n plt.ylabel('Sensivity')\n plt.xlabel('FPR')\n\n plt.savefig(\n fname='K=' + str(K) + ',eta=' + str(eta1) + ',k=' + str(k) + ',lambda_l=' + str(lambda_l) + ',lambda_m=' + str(\n lambda_m) + ').png')\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "IPA/MDIPA.py", "file_name": "MDIPA.py", "file_ext": "py", "file_size_in_byte": 22097, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "random.randint", "line_number": 20, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 28, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 44, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 67, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.array_split", "line_number": 105, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 108, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 194, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 195, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 235, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 260, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 260, "usage_type": "name"}, {"api_name": "numpy.matmul", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 267, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 470, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 471, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 471, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 472, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 472, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 473, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 473, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 475, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 475, "usage_type": "name"}]} +{"seq_id": "90946889", "text": "# get data\n# create two articles on with punctuation and the other with any punctuations\n# count word frequency with the second type of article\n# find the most relevant sentences from 1 st article based on word frequency\n# use these sentences as summary by joining all these articles\n\n\n#from gensim.summarization import summarize\ndef final_summary_text(text,nos):\n import re\n import urllib.request\n import requests\n from bs4 import BeautifulSoup\n from nltk.corpus import stopwords\n from string import punctuation\n import nltk\n nltk.download('punkt')\n import sys\n\n\n stop_words=stopwords.words('english')\n punctuation=punctuation + '\\n'\n\n def get_semiclean_content(text):\n #obj=requests.get(url)\n #text=obj.text\n #soup=BeautifulSoup(text,features=\"lxml\")\n #paras=soup.find_all(\"p\")\n #newtext=' '\n #for para in paras:\n #newtext+=para.text\n text=nltk.sent_tokenize(text)\n semi_cleaned=semi_cleaned_text(text)\n return semi_cleaned\n\n\n def semi_cleaned_text(doc):\n # Removing Square Brackets and Extra Spaces\n newdoc=' '\n for sen in doc:\n newdoc+=sen\n newdoc= re.sub(r'\\[[0-9]*\\]', ' ', newdoc)\n newdoc= re.sub(r'\\s+', ' ', newdoc)\n return newdoc\n \n\n\n def cleaned_text(doc):\n newdoc=' '\n for sen in doc:\n newdoc+=sen\n newdoc = re.sub('[^a-zA-Z]',\" \",newdoc) # only alphabets\n newdoc= re.sub('\\s+',\" \",newdoc)\n return newdoc\n\n\n def get_clean_content(text):\n #obj=requests.get(url)\n #text=obj.text\n #soup=BeautifulSoup(text,features=\"lxml\")\n #paras=soup.find_all(\"p\")\n #newtext=' '\n #for para in paras:\n #newtext+=para.text\n text=nltk.sent_tokenize(text)\n cleaned=cleaned_text(text)\n return cleaned\n\n\n final=get_clean_content(text)\n semi_final=get_semiclean_content(text)\n sentences=nltk.sent_tokenize(final)\n\n\n word_frequencies= {}\n\n for word in nltk.word_tokenize(final):\n if word.lower() not in stop_words:\n if word not in word_frequencies.keys():\n word_frequencies[word] = 1\n else:\n word_frequencies[word] += 1\n \n\n mx=max(word_frequencies.values())\n\n for word in word_frequencies.keys():\n word_frequencies[word]=word_frequencies[word]/mx\n\n sentences=nltk.sent_tokenize(semi_final)\n total=len(sentences)\n\n scores= {}\n for sen in sentences:\n for word in nltk.word_tokenize(sen.lower()):\n if word in word_frequencies.keys():\n if len(sen.split(' ')) < 50:\n if sen not in scores:\n scores[sen]=word_frequencies[word]\n else:\n scores[sen]+=word_frequencies[word]\n\n import heapq\n number=nos\n summary_sen=heapq.nlargest(number,scores,key=scores.get)\n summary= ' '.join(summary_sen)\n return summary\n\n", "sub_path": "neww.py", "file_name": "neww.py", "file_ext": "py", "file_size_in_byte": 3017, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "nltk.download", "line_number": 17, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 21, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 21, "usage_type": "name"}, {"api_name": "string.punctuation", "line_number": 22, "usage_type": "name"}, {"api_name": "nltk.sent_tokenize", "line_number": 32, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 42, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 43, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 52, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 53, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 65, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 72, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 77, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 90, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 95, "usage_type": "call"}, {"api_name": "heapq.nlargest", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "519903451", "text": "import model\nimport measure\nimport evaluate\nimport locate\nimport numpy as np\nimport itertools\nimport abc\nimport matplotlib.pyplot as plt\n\n\nclass Refine:\n def __init__(self, image, positions, shapes, shape_model):\n self.image = image\n self.positions = positions\n self.shapes = shapes\n self.model = shape_model\n self.blank_region = self._get_blank_region()\n self.radius = np.array([self.model.get_radius(s, min) for s in shapes])\n\n def _get_occupied_index(self, position, radius):\n lower = [max(0, np.ceil(dim - radius)) for dim in position] # [x0, y0, z0]\n upper = [min(self.image.shape[i], np.ceil(dim + radius)) for i, dim in enumerate(position)] # [x1, y1, z1]\n box = []\n for dim in range(self.image.ndim):\n pixel_1d = np.arange(lower[dim], upper[dim])\n box.append(pixel_1d.astype(int))\n\n box = list(itertools.product(*box))\n for index in box:\n index = np.array(index)\n dist = np.linalg.norm(index - position)\n if dist <= radius:\n yield np.array(index).astype(int)\n\n def _get_blank_region(self):\n region = np.ones(self.image.shape, dtype=bool)\n occupied_indices = []\n r_min = min([self.model.get_radius(s, min) for s in self.shapes])\n for pos, shape in zip(self.positions, self.shapes): # pos is still a collection of points with the same shape\n radius = self.model.get_radius(shape, method=min) # use smaller radius to test overlap\n radius = radius + r_min\n for p in pos:\n indices = list(self._get_occupied_index(p, radius))\n occupied_indices += indices\n occupied_indices = np.array(occupied_indices).T\n region[tuple(occupied_indices)] = False\n return region\n\n def _increase_blank_region(self, position, shape):\n radius = self.model.get_radius(shape, method=min)\n occupied_indices = np.array(list(self._get_occupied_index(position, radius))).T\n self.blank_region[tuple(occupied_indices)] = False\n\n def _get_shape_from_position(self, position):\n x, y, z = position.astype(int)\n intensity = self.image[x, y, z]\n intensity_table = np.array([shape['intensity_max'] for shape in self.shapes])\n closet = np.argmin(np.abs(intensity_table - intensity))\n return self.shapes[closet]\n\n def get_residual(self, intensity_threshold=0):\n \"\"\"\n for every pixel inside the \"blank region\", try to place a particle there\n brighter ones go first\n :param intensity_threshold: a percentage number, pixels below this intensity would not be considered\n \"\"\"\n image_remaining = self.image * self.blank_region\n return image_remaining\n\n def fit_shape(self):\n pass\n\n\nclass IsiRefine(Refine):\n def __init__(self, image, positions, shapes, shape_model, error_tolerance, intensity_threshold=0):\n \"\"\"\n This refiner is heavily coupled with the model\n :param positions: a list of positions of particles, [(number, dimension), ...]\n :param shapes: a list of shapes, [{shape_1, ...}, {shape_2, ...}, ...]\n :param expand: the expansion of the radius, this has the same meaning as the one in locator\n \"\"\"\n Refine.__init__(self, image, positions, shapes, shape_model)\n self.error_tolerance = error_tolerance\n self.intensity_threshold = intensity_threshold\n\n def _move_to_better(self, position, shape):\n move_1d = [-2, -1, 0, 1, 2]\n move_3d = np.array(list(itertools.product(move_1d, move_1d, move_1d)))\n candidates = np.expand_dims(position, 0) + move_3d # (number, dimension)\n in_image = []\n for dim in range(self.image.ndim):\n cand_1d = candidates.T[dim]\n cand_1d[cand_1d < 0] = 0\n cand_1d[cand_1d >= self.image.shape[dim]] = self.image.shape[dim] - 1\n in_image.append(cand_1d)\n candidates = np.array(in_image).T # (number, dimension)\n shapes = [self._get_shape_from_position(p) for p in candidates]\n max_shape = shapes[np.argmax([s['sigma_z'] for s in shapes])]\n radius_z = self.model.get_radius(max_shape, method=max) # larger size, used to draw sub-image\n sub_image_box = list(measure.get_sub_image_box_for_cluster(candidates, radius_z, self.image.shape))\n errors = [evaluate.get_pixel_err(self.image, p, s, sub_image_box) for p, s in zip(candidates, shapes)]\n new_position = candidates[np.argmin(errors)]\n return new_position\n\n def _move_to_brighter(self, position, shape):\n move_1d = [-1, 0, 1]\n move_3d = np.array(list(itertools.product(move_1d, move_1d, move_1d)))\n candidates = np.expand_dims(position, 0) + move_3d # (number, dimension)\n in_image = []\n for dim in range(self.image.ndim):\n cand_1d = candidates.T[dim]\n cand_1d[cand_1d < 0] = 0\n cand_1d[cand_1d >= self.image.shape[dim]] = self.image.shape[dim] - 1\n in_image.append(cand_1d)\n candidates = np.array(in_image).T # (number, dimension)\n\n radius_z = self.model.get_radius(shape, method=max) # larger size, used to draw sub-image\n sub_image_box = list(measure.get_sub_image_box_for_cluster(candidates, radius_z, self.image.shape))\n intensities = [evaluate.get_pixel_brightness(self.image, p, shape, sub_image_box) for p in candidates]\n new_position = candidates[np.argmax(intensities)]\n return new_position\n\n def _is_shape_not_similar(self, position, shape):\n radius = self.model.get_radius(shape, max)\n sub_image_box = list(measure.get_sub_image_box(position, radius, self.image.shape))\n error = evaluate.get_pixel_err(self.image, position, shape, sub_image_box)\n return error > self.error_tolerance\n\n def _is_shape_too_dim(self, position, shape):\n radius = self.model.get_radius(shape, max)\n sub_image_box = list(measure.get_sub_image_box(position, radius, self.image.shape))\n brightness = evaluate.get_pixel_brightness(self.image, position, shape, sub_image_box)\n return brightness < self.intensity_threshold\n\n def update_positions(self, add_number=10, move_step=10, method='better'):\n \"\"\"\n The positions and shapes would be updated\n This is a modified version of tracking algorithm in Claudia's thesis\n The original description is available upon request, please contact\n yushi.yang@bristol.ac.uk or Paddy.Royall@bristol.ac.uk\n :param add_number: the number to add for each refinement\n :param move_step: the number of the deterministic moving\n :param method: the way to update a single particle,\n better - reduce the difference between simulation and image\n brighter - increase the normalised intensity inside a particle\n normalised intensity = (I * simulation) / simulation_size\n \"\"\"\n\n # only add to the place where there is no particle\n available_positions = np.array(np.where(self.blank_region > 0)).T\n new_indices = np.random.permutation(len(available_positions))[:add_number]\n new_positions = available_positions[new_indices]\n if method == 'better':\n move_func = self._move_to_better\n eval_func = self._is_shape_not_similar\n elif method == 'brighter':\n move_func = self._move_to_brighter\n eval_func = self._is_shape_too_dim\n else:\n raise RuntimeError(f\"{method} is not implemented for position update\")\n\n for p in new_positions:\n shape = self._get_shape_from_position(p)\n for _ in range(move_step):\n p = move_func(p, shape)\n shape = self._get_shape_from_position(p)\n if eval_func(p, shape):\n continue\n is_moving_to_existing_positions = False\n for p2 in np.concatenate(self.positions):\n if np.allclose(p, p2):\n is_moving_to_existing_positions = True\n continue\n if not is_moving_to_existing_positions:\n radius = self.model.get_radius(shape, min)\n minimum_distances = radius + self.radius\n after_removal = locate.remove_overlap_multiscale([p], self.positions, minimum_distances)\n if len(after_removal) > 0:\n self.positions += [np.expand_dims(p, 0)]\n self.shapes += [shape]\n self.radius = np.array([self.model.get_radius(s, min) for s in self.shapes])\n self._increase_blank_region(p, shape)\n", "sub_path": "src/refine.py", "file_name": "refine.py", "file_ext": "py", "file_size_in_byte": 8784, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 25, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 98, "usage_type": "call"}, {"api_name": "measure.get_sub_image_box_for_cluster", "line_number": 100, "usage_type": "call"}, {"api_name": "evaluate.get_pixel_err", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 107, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "measure.get_sub_image_box_for_cluster", "line_number": 118, "usage_type": "call"}, {"api_name": "evaluate.get_pixel_brightness", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 120, "usage_type": "call"}, {"api_name": "measure.get_sub_image_box", "line_number": 125, "usage_type": "call"}, {"api_name": "evaluate.get_pixel_err", "line_number": 126, "usage_type": "call"}, {"api_name": "measure.get_sub_image_box", "line_number": 131, "usage_type": "call"}, {"api_name": "evaluate.get_pixel_brightness", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 171, "usage_type": "call"}, {"api_name": "locate.remove_overlap_multiscale", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 181, "usage_type": "call"}]} +{"seq_id": "337509114", "text": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# Generated file, DO NOT EDIT\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass IdentityUpdateData(Model):\n \"\"\"IdentityUpdateData.\n\n :param id:\n :type id: str\n :param index:\n :type index: int\n :param updated:\n :type updated: bool\n \"\"\"\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'index': {'key': 'index', 'type': 'int'},\n 'updated': {'key': 'updated', 'type': 'bool'}\n }\n\n def __init__(self, id=None, index=None, updated=None):\n super(IdentityUpdateData, self).__init__()\n self.id = id\n self.index = index\n self.updated = updated\n", "sub_path": "vsts/vsts/identity/v4_1/models/identity_update_data.py", "file_name": "identity_update_data.py", "file_ext": "py", "file_size_in_byte": 1149, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "msrest.serialization.Model", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "635589863", "text": "# encoding: utf-8\n\"\"\"\nTests Views of the application\n\"\"\"\n\nimport unittest\nimport uuid\nfrom flask import url_for\nfrom biblib.models import User, Library, Permissions, MutableDict\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom biblib.views import UserView, LibraryView, DocumentView, PermissionView, \\\n BaseView, TransferView, ClassicView\nfrom biblib.views import DEFAULT_LIBRARY_DESCRIPTION\nfrom biblib.tests.stubdata.stub_data import UserShop, LibraryShop, fake_biblist\nfrom biblib.utils import get_item\nfrom biblib.biblib_exceptions import BackendIntegrityError, PermissionDeniedError\nfrom biblib.tests.base import TestCaseDatabase, MockEmailService, \\\n MockSolrBigqueryService\n\n\nclass TestLibraryViews(TestCaseDatabase):\n \"\"\"\n Base class to test the Library view for GET\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor of the class\n\n :param args: to pass on to the super class\n :param kwargs: to pass on to the super class\n\n :return: no return\n \"\"\"\n\n super(TestLibraryViews, self).__init__(*args, **kwargs)\n self.user_view = UserView\n self.library_view = LibraryView\n\n self.stub_user = self.stub_user_1 = UserShop()\n self.stub_user_2 = UserShop()\n\n self.stub_library = LibraryShop()\n\n @unittest.skip('')\n def test_library_pagination_default(self):\n \"\"\"\n Test that users that do not request pagination, do not have any issues\n \"\"\"\n\n # Ensure a user exists\n user = User(absolute_uid=self.stub_user.absolute_uid)\n with self.app.session_scope() as session:\n session.add(user)\n session.commit()\n\n bibcodes = {i: {} for i in fake_biblist(40)}\n\n # Ensure a library exists\n library = Library(name='MyLibrary',\n description='My library',\n public=True,\n bibcode=bibcodes)\n\n # Give the user and library permissions\n permission = Permissions(owner=True,\n read=True,\n write=True)\n\n # Commit the stub data\n user.permissions.append(permission)\n library.permissions.append(permission)\n session.add_all([library, permission, user])\n session.commit()\n\n # Test default pagination\n lib_id = LibraryView.helper_uuid_to_slug(library.id)\n\n url = url_for('libraryview', library=lib_id)\n\n with MockSolrBigqueryService(number_of_bibcodes=20) as BQ, \\\n MockEmailService(self.stub_user, end_type='uid') as EP:\n r = self.client.get(\n url,\n headers=self.stub_user.headers()\n )\n\n self.assertStatus(r, 200)\n self.assertAlmostEqual(\n self.stub_library.bibcode.keys(),\n r.json['documents']\n )\n\n @unittest.skip('')\n def test_library_pagination_user_supplied(self):\n \"\"\"\n \"\"\"\n\n # Ensure a user exists\n user = User(absolute_uid=self.stub_user.absolute_uid)\n with self.app.session_scope() as session:\n session.add(user)\n session.commit()\n\n # Ensure a library exists\n library = Library(name='MyLibrary',\n description='My library',\n public=True,\n bibcode=self.stub_library.bibcode)\n\n # Give the user and library permissions\n permission = Permissions(owner=True,\n read=True,\n write=True)\n\n # Commit the stub data\n user.permissions.append(permission)\n library.permissions.append(permission)\n session.add_all([library, permission, user])\n session.commit()\n\n lib_id = LibraryView.helper_uuid_to_slug(library.id)\n\n #test with user supplied start, rows, sort, and fields\n url = url_for('libraryview', library=lib_id, params={\n 'start' : 100,\n 'rows' : 100,\n 'sort' : 'citation_count desc',\n 'fl' : 'bibcode,title,abstract'\n })\n\n\n r = self.client.get(url, headers={'X-Adsws-Uid': self.stub_user.absolute_uid})\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n", "sub_path": "biblib/tests/unit_tests/test_pagination.py", "file_name": "test_pagination.py", "file_ext": "py", "file_size_in_byte": 4555, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "biblib.tests.base.TestCaseDatabase", "line_number": 22, "usage_type": "name"}, {"api_name": "biblib.views.UserView", "line_number": 38, "usage_type": "name"}, {"api_name": "biblib.views.LibraryView", "line_number": 39, "usage_type": "name"}, {"api_name": "biblib.tests.stubdata.stub_data.UserShop", "line_number": 41, "usage_type": "call"}, {"api_name": "biblib.tests.stubdata.stub_data.UserShop", "line_number": 42, "usage_type": "call"}, {"api_name": "biblib.tests.stubdata.stub_data.LibraryShop", "line_number": 44, "usage_type": "call"}, {"api_name": "biblib.models.User", "line_number": 53, "usage_type": "call"}, {"api_name": "biblib.tests.stubdata.stub_data.fake_biblist", "line_number": 58, "usage_type": "call"}, {"api_name": "biblib.models.Library", "line_number": 61, "usage_type": "call"}, {"api_name": "biblib.models.Permissions", "line_number": 67, "usage_type": "call"}, {"api_name": "biblib.views.LibraryView.helper_uuid_to_slug", "line_number": 78, "usage_type": "call"}, {"api_name": "biblib.views.LibraryView", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 80, "usage_type": "call"}, {"api_name": "biblib.tests.base.MockSolrBigqueryService", "line_number": 82, "usage_type": "call"}, {"api_name": "biblib.tests.base.MockEmailService", "line_number": 83, "usage_type": "call"}, {"api_name": "unittest.skip", "line_number": 46, "usage_type": "call"}, {"api_name": "biblib.models.User", "line_number": 101, "usage_type": "call"}, {"api_name": "biblib.models.Library", "line_number": 107, "usage_type": "call"}, {"api_name": "biblib.models.Permissions", "line_number": 113, "usage_type": "call"}, {"api_name": "biblib.views.LibraryView.helper_uuid_to_slug", "line_number": 123, "usage_type": "call"}, {"api_name": "biblib.views.LibraryView", "line_number": 123, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 126, "usage_type": "call"}, {"api_name": "unittest.skip", "line_number": 95, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "281715163", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 22 11:33:18 2021\r\n@author: darsh\r\n\"\"\"\r\nimport openslide\r\nimport numpy as np\r\nfrom getMaskFromXml import getMaskFromXml\r\nfrom skimage.transform import rescale,resize \r\nimport os\r\nfrom skimage.measure import label,regionprops\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport FNs\r\nimport lxml.etree as ET\r\nfrom getPASnuclei import getPASnuclei\r\nimport imageio\r\nfrom pvd_calculation import pvd_calculation #new\r\nimport csv #new\r\n\r\ndef create_podocyte_Outxml_CNN(svsfile,xmlfile,crop_size,resdir,PAS_nuc_thre,size_thre,gauss_filt_size,watershed_dist_thre,disc_size,resol,tissue_thickness,csv_file_name):\r\n \r\n print(\"Reading PAS file...\")\r\n Imagename = os.path.basename(svsfile).split('.')[0]\r\n sourcePAS = openslide.open_slide(svsfile)\r\n print(\"Opening WSIs in mid resolution...\")\r\n PAS = np.array(sourcePAS.read_region((0,0),1,sourcePAS.level_dimensions[1]),dtype = \"uint8\")\r\n PAS_mpp = (float(sourcePAS.properties[openslide.PROPERTY_NAME_MPP_X])+float(sourcePAS.properties[openslide.PROPERTY_NAME_MPP_Y]))/2#new\r\n PAS = PAS[:,:,0:3] \r\n \r\n '''XML annotation to mask'''\r\n '''======================''' \r\n print(\"Converting Glom xml to mask...\") \r\n PASmask = rescale(getMaskFromXml(sourcePAS,xmlfile), 1, anti_aliasing=False)*1\r\n highres_w = crop_size/4\r\n\r\n if resol == 0:\r\n TPI_F = np.zeros((sourcePAS.level_dimensions[0][1],sourcePAS.level_dimensions[0][0]))\r\n else:\r\n TPI_F = np.zeros((sourcePAS.level_dimensions[1][1],sourcePAS.level_dimensions[1][0]))\r\n print(TPI_F.shape)\r\n \r\n flip_flag = 0\r\n if(PASmask.shape[0] ==sourcePAS.level_dimensions[1][1]):\r\n print(\"PAS and Mask mid resolution is flipped...\")\r\n flip_flag = 1\r\n\r\n highres_w = crop_size/4\r\n all_glom_pvds = []#new\r\n\r\n countPatch = 0\r\n c = 0\r\n for region in regionprops(label(PASmask)):\r\n c +=1\r\n minr, minc, maxr, maxc = region.bbox\r\n \r\n ptx = (minr+maxr)/2\r\n pty = (minc+maxc)/2\r\n \r\n centroids = [pty,ptx] \r\n startx = int(max((centroids[0]-(highres_w/2))*4,0))\r\n starty = int(max((centroids[1]-(highres_w/2))*4,0))\r\n endx = int(highres_w*4)\r\n endy = int(highres_w*4)\r\n \r\n crop_imgPAS = np.array(sourcePAS.read_region(((startx),(starty)),0,((endx),(endy))),dtype = \"uint8\")\r\n crop_imgPAS = crop_imgPAS[:,:,0:3]\r\n \r\n \r\n Glommask_1 = PASmask[int(ptx-(highres_w/2)):int(ptx+(highres_w/2)),int(pty-(highres_w/2)):int(pty+(highres_w/2))]\r\n if crop_imgPAS[:,:,0].shape != (Glommask_1.shape[0]*4,Glommask_1.shape[1]*4):\r\n continue\r\n\r\n Glommask2 = resize(Glommask_1,(highres_w*4,highres_w*4),anti_aliasing=True)*1\r\n Glommask2 = cv2.threshold((Glommask2), 0.1, 255, cv2.THRESH_BINARY)[1] \r\n \r\n print(\"Analyzing patches...\")\r\n '''========================================='''\r\n \r\n if crop_imgPAS.shape == (highres_w*4,highres_w*4,3):\r\n countPatch += 1\r\n filename = Imagename +'_'+str(countPatch) \r\n print(filename) \r\n \r\n cnn_out_name = 'b'+\"'\"+filename+\".png'\"+\".png\"\r\n \r\n fil_name = resdir+ cnn_out_name \r\n predicted_im = imageio.imread(fil_name)\r\n# predicted_im = resize(predicted_im, (crop_imgPAS[:,:,0].shape),anti_aliasing=True)\r\n \r\n '''Segment pix2pix detected podocyte nuclei'''\r\n '''=========================================''' \r\n\r\n predicted_mask = predicted_im==2*1\r\n\r\n \r\n '''Segment PAS nuclei'''\r\n '''========================================='''\r\n AllPAS = (getPASnuclei(crop_imgPAS[:,:,0:3],Glommask2,PAS_nuc_thre,size_thre,gauss_filt_size,watershed_dist_thre,disc_size))*1\r\n\r\n LabelPAS = label(AllPAS)\r\n FakePodPASmask = np.zeros(AllPAS.shape) \r\n count2 = 1\r\n for reg in LabelPAS:\r\n eachIm = (LabelPAS == count2)*1\r\n eachImprops = regionprops(eachIm)\r\n N_area = [eprop.area for eprop in eachImprops]\r\n count2+=1\r\n if not N_area:\r\n continue\r\n else:\r\n if (np.sum(eachIm*predicted_mask*1)>int(0.7*float(N_area[0]))):\r\n FakePodPASmask = FakePodPASmask + eachIm\r\n \r\n del predicted_im\r\n \r\n '''PVD calcultation'''#new\r\n '''================'''#new\r\n \r\n all_vals = pvd_calculation(FakePodPASmask,Glommask2,PAS_mpp,tissue_thickness)#new\r\n all_glom_pvds.append([countPatch,all_vals[0],all_vals[1],all_vals[2],all_vals[3],all_vals[4],\r\n all_vals[5], all_vals[6], all_vals[7]])#new\r\n \r\n\r\n if resol ==0:\r\n if Imagename[0:3]=='NTN':\r\n try:\r\n FakePodPASmask = np.fliplr(np.rot90(FakePodPASmask,3))\r\n TPI_F[int(pty-(highres_w/2))*4:int(pty+(highres_w/2))*4,int(ptx-(highres_w/2))*4:int(ptx+(highres_w/2))*4] = FakePodPASmask\r\n except:\r\n continue\r\n else:\r\n try:\r\n TPI_F[int(ptx-(highres_w/2))*4:int(ptx+(highres_w/2))*4,int(pty-(highres_w/2))*4:int(pty+(highres_w/2))*4] = FakePodPASmask\r\n except:\r\n continue\r\n else:\r\n nuc_mask = rescale(FakePodPASmask, 0.25, anti_aliasing=False,preserve_range=True)\r\n nuc_mask = cv2.threshold(nuc_mask, 0.01, 255, cv2.THRESH_BINARY)[1]\r\n if Imagename[0:3]=='NTN':\r\n try:\r\n nuc_mask = np.fliplr(np.rot90(nuc_mask,3))\r\n TPI_F[int(pty-(highres_w/2)):int(pty+(highres_w/2)),int(ptx-(highres_w/2)):int(ptx+(highres_w/2))] = nuc_mask\r\n except:\r\n continue\r\n else:\r\n try:\r\n TPI_F[int(ptx-(highres_w/2)):int(ptx+(highres_w/2)),int(pty-(highres_w/2)):int(pty+(highres_w/2))] = nuc_mask\r\n except:\r\n continue\r\n# \r\n agpvd_matrix = np.matrix(all_glom_pvds) \r\n final_pvd = [np.float(np.max(agpvd_matrix[:,0])),tissue_thickness, np.float(sum(agpvd_matrix[:,2])),\r\n np.float(sum(agpvd_matrix[:,3])),np.float(np.mean(agpvd_matrix[:,4])),np.float(np.mean(agpvd_matrix[:,5])),\r\n np.float(np.mean(agpvd_matrix[:,6])),np.float(np.mean(agpvd_matrix[:,7])),\r\n np.float(sum(agpvd_matrix[:,3]))*np.float(np.mean(agpvd_matrix[:,7]))/(np.float(sum(agpvd_matrix[:,2]))*tissue_thickness)*10000] \r\n myFile = open(csv_file_name, 'w') \r\n with myFile:\r\n writer = csv.writer(myFile)\r\n writer.writerow([\"Sr.no\",\"Tissue thickness (T)\", \"Glom. area (sq. microns)\",\r\n \"Num. of podocytes\", \"Apparent mean nuclear caliper diam.(microns) (d)\",\r\n \"shape factor (k)\",\"True mean nuclear caliper diam.(microns)(D)\",\"Correction Factor (CF)\",\r\n \"Pod. vol. density (n/10^4 cubic microns)\"])\r\n writer.writerows(all_glom_pvds) \r\n writer.writerow([\"Final result:\",\"-\", \"-\",\"-\", \"-\",\"-\",\"-\",\"-\",\"-\",])\r\n writer.writerow([ \"Total glom profiles\",\"Tissue thickness (T)\", \"Total Glom. area (sq. microns)\",\r\n \"Total num. of podocytes\", \"Avg. Apparent mean nuclear caliper diam.(microns)(d)\",\r\n \"shape factor (k)\",\"Avg. true mean nuclear caliper diam.(microns)(D)\",\"Avg. Correction Factor (CF)\",\r\n \"Pod. vol. density (n/10^4 cubic microns)\"])\r\n writer.writerow(map(lambda y: y, final_pvd)) \r\n \r\n \r\n\r\n print('Generating pix2pix output xml...')\r\n '''========================================='''\r\n if resol == 0 and Imagename[0:3]=='NTN': \r\n TP2 = cv2.threshold((TPI_F), 0.5, 255, cv2.THRESH_BINARY)[1] \r\n TP2 = np.transpose(TP2)\r\n\r\n elif resol == 0: \r\n TP2 = cv2.threshold((TPI_F), 0.5, 255, cv2.THRESH_BINARY)[1] \r\n elif resol == 1 and Imagename[0:3]=='NTN': \r\n TP2 = cv2.threshold((TPI_F), 0.5, 255, cv2.THRESH_BINARY)[1] \r\n TP2 = np.transpose(TP2)\r\n elif resol == 1:\r\n TP2 = cv2.threshold((TPI_F), 0.5, 255, cv2.THRESH_BINARY)[1]\r\n \r\n offset={'X': 0,'Y': 0} \r\n maskPoints,_ = cv2.findContours(np.array((np.uint8(TP2))), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n pointsList = []\r\n for j in range(np.shape(maskPoints)[0]):\r\n pointList = []\r\n for i in range(np.shape(maskPoints[j])[0]):\r\n point = {'X': (maskPoints[j][i][0][0]) + offset['X'], 'Y': (maskPoints[j][i][0][1]) + offset['Y']}\r\n pointList.append(point)\r\n pointsList.append(pointList)\r\n Annotations = ET.Element('Annotations', attrib={'MicronsPerPixel': '0.136031'})\r\n col1 = str(65280)\r\n Annotations = FNs.xml_add_annotation(Annotations=Annotations,annotationID=1,LC = col1)\r\n \r\n for i in range(np.shape(pointsList)[0]):\r\n pointList = pointsList[i]\r\n Annotations = FNs.xml_add_region(Annotations=Annotations, pointList=pointList) \r\n \r\n \r\n\r\n return TP2\r\n", "sub_path": "histomicstk/PodoSighter_cnn_folder/create_podocyte_Outxml_CNN.py", "file_name": "create_podocyte_Outxml_CNN.py", "file_ext": "py", "file_size_in_byte": 9531, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.basename", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "openslide.open_slide", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "openslide.PROPERTY_NAME_MPP_X", "line_number": 28, "usage_type": "attribute"}, {"api_name": "openslide.PROPERTY_NAME_MPP_Y", "line_number": 28, "usage_type": "attribute"}, {"api_name": "skimage.transform.rescale", "line_number": 34, "usage_type": "call"}, {"api_name": "getMaskFromXml.getMaskFromXml", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "skimage.measure.regionprops", "line_number": 53, "usage_type": "call"}, {"api_name": "skimage.measure.label", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 75, "usage_type": "attribute"}, {"api_name": "imageio.imread", "line_number": 88, "usage_type": "call"}, {"api_name": "getPASnuclei.getPASnuclei", "line_number": 99, "usage_type": "call"}, {"api_name": "skimage.measure.label", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 102, "usage_type": "call"}, {"api_name": "skimage.measure.regionprops", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 112, "usage_type": "call"}, {"api_name": "pvd_calculation.pvd_calculation", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 128, "usage_type": "call"}, {"api_name": "skimage.transform.rescale", "line_number": 138, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 139, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.fliplr", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 156, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 159, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 177, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 177, "usage_type": "attribute"}, {"api_name": "numpy.transpose", "line_number": 178, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 181, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 181, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 183, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 183, "usage_type": "attribute"}, {"api_name": "numpy.transpose", "line_number": 184, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 186, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 186, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 189, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 189, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 193, "usage_type": "call"}, {"api_name": "lxml.etree.Element", "line_number": 197, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 197, "usage_type": "name"}, {"api_name": "FNs.xml_add_annotation", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 201, "usage_type": "call"}, {"api_name": "FNs.xml_add_region", "line_number": 203, "usage_type": "call"}]} +{"seq_id": "257061111", "text": "import requests\nfrom flask import make_response\nfrom flask import Flask, jsonify, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom pymongo import MongoClient\nimport json\n\n# Init app\napp = Flask(__name__)\n\n# app.config['MONGO_DBNAME'] = 'databasename'\n# app.config['MONGO_URI'] = 'mongodb://127.0.0.1:27017'\n\n# mongo = PyMongo(app)\nmongo = MongoClient(host='127.0.0.1', port=27017)\ndb = mongo[\"嘿嘿\"]\n\n\n@app.route('/api/task_id=&pagesize=', methods=['GET'])\ndef get_all_frameworks(task_id, size):\n sheet = db[\"嘿嘿\"]\n output = []\n for q in sheet.find({},{'_id':0}).skip((task_id-1) * 10).limit(size):\n print(q)\n output.append(q)\n return json.dumps({'result' : output}, ensure_ascii=False)\n\n@app.route('/api/', methods=['GET'])\ndef get_one_framework(name):\n sheet = db[\"detail_data\"]\n q = sheet.find_one({'name' : name})\n if q:\n output = {'name' : q['name']}\n else:\n output = 'No results found'\n return json.dumps({'result' : output}, ensure_ascii=False)\n\n@app.route('/api/jscode/',methods=['GET']) # 获取openid\ndef get_user_info(js_code):\n req_params = {\n \"appid\" : 'appid',\n \"secret\" : 'secret',\n \"js_code\": js_code,\n \"grant_type\": 'authorization_code'\n }\n print(req_params)\n result = requests.get('https://api.weixin.qq.com/sns/jscode2session', params=req_params, timeout=15, verify=False)\n print(result.text)\n return jsonify({'result' : json.loads(result.text)})\n\n@app.route('/api/post/', methods=['POST'])\ndef add_framework():\n sheet = db[\"users\"]\n json_data = request.json\n print(json_data)\n if sheet.update({\"openid\":json_data[\"openid\"]},{\"$set\": json_data},True):\n return jsonify({\"Msg\": \"Success\",\"data\":json_data})\n else:\n return jsonify({\"Msg\": \"数据库操作失败!!\"})\n\n\n\n@app.route('/api/', methods=['PUT'])\ndef update_framework():\n sheet = db[\"detail_data\"]\n name = request.json['name']\n print(request.json)\n if sheet.update({'name': name}, {'$set': request.json}, True):\n print(\"TRUE\") # {'ok': 1, 'nModified': 0, 'n': 1, 'updatedExisting': True}\n\n new_framework = sheet.find_one({'name' : name})\n output = {'name' : new_framework['name'], 'language' : new_framework['language']}\n return jsonify({'result' : output})\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(json.dumps({'error': '服务器出错,而我们没有任何的棒法!'}, ensure_ascii=False), 404)\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\",debug=True)\n\n\n\n\n\n\n\n\n# 获取openid\n# import requests\n# def get_user_info(js_code):\n# headers = {\"User-Agent\": \"Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1 wechatdevtools/1.02.1906272 MicroMessenger/7.0.4 Language/zh_CN webview/\"}\n#\n# req_params = {\n# \"appid\" : 'appid',\n# \"secret\" : 'secret',\n# \t\t \"js_code\": js_code,\n# \t\t \"grant_type\": 'authorization_code'\n# \t}\n# req_resutl = requests.get('https://api.weixin.qq.com/sns/jscode2session', headers=headers, params=req_params, timeout=3, verify=False)\n# return req_resutl.text\n\n# result = get_user_info('js_code')\n# print(result)\n\n\n\n\n\n", "sub_path": "api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 3283, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 15, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 26, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 78, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "15639940", "text": "from socket import *\nimport select\nimport sys\nimport argparse\nimport select\n\n\n\nparser = argparse.ArgumentParser(description='ftp client.')\nparser.add_argument('--ip_address', required=False, type=str, default='localhost' ,help='La direccion IP del servidor')\nparser.add_argument('--port',required=False, default= 12000, type=int, help='El puerto donde escucha el servidor')\nargs = parser.parse_args()\nserverName = args.ip_address \nserverPort = args.port\n\n\nclientSocket = socket(AF_INET, SOCK_STREAM) #Crea el socket del cliente\nserver_address=(serverName, serverPort)\n\n\n\nclientSocket.connect((server_address)) # Conexion entre el cliente y el servidor \n\n\ndevices=[clientSocket, sys.stdin]\n\nwhile 1:\n\n\n\tclientSocket.send('list'.encode())\n\tmessageserver= (clientSocket.recv(4096)).decode()\n\tnombres=messageserver.split(',')\n\tprint('Estimado Usuario: Para acceder a los archivos del servidor debe usar algunos de estos comandos: list= LIstado de archivos, get nombredearchivo:solicitud y descarga de archivo, quit: Salir')\n\n\tdevice, output, error= select.select(devices,[],[],60)\n\n\tif not device:\n\t\t\t\n\t\t\tclientSocket.send('quit'.encode())\n\n\t\t\tprint(\"Se acabo el tiempo\")\n\t\t\tbreak\n\n\tif sys.stdin in device :\n\t\tmessageclient= sys.stdin.readline().rstrip('\\n')# el mensaje se lee desde el teclado(rstrip elimina el salto de linea)\n\t\t\n\t\t\n\n\t\t\n\n\t\tif messageclient=='list':\n\t\t\t\n\t\t\t\n\t\t\tfor n in nombres:\n\t\t\t\tprint (n)\n\n\t\t\t\n\t\telif messageclient[0:4]== 'get ': #Si las primeras 4 letras de messageclient es 'get ' el usuario esta pidiendo un archivo\n\t\t\tnamefile=messageclient[4:]\n\t\t\tclientSocket.send(namefile.encode())\n\t\t\tif namefile in nombres:\n\t\t\t\twith open(namefile, 'wb') as archivo: # wb: abrir para escritura en binario\n\t\t\t\t\tclientSocket.settimeout(3)\n\t\t\t\t\twhile 1:\n\t\t\t\t\t\t\n\t\t\t\t\t\tprint(\"Recibiendo Archivo...\")\n\t\t\t\t\t\t\n\t\t\t\t\t\ttry:\n\n\t\t\t\t\t\t\tmessageserver = clientSocket.recv(4096) #Recibe un paquete hasta 4096 bytes\n\t\t\t\t\t\t\tarchivo.write(messageserver)\n\n\t\t\t\t\t\texcept timeout:\n\t\t\t\t\t\t\tprint('Archivo Recibido')\n\t\t\t\t\t\t\tbreak\n\n\n\n\t\t\telse:\n\t\t\t\tprint(\"Archivo inexistente\")\n\t\t\t\n\t\t\t\n\t\telif messageclient== 'quit':\n\t\t\tclientSocket.send(messageclient.encode())\n\t\t\tbreak\n\n\t\telse:\n\t\t\t\n\t\t\tprint('Comando Incorrecto.Vuelva a intentarlo nuevamente')\n\n\nprint (\"Fin de la conexion\")\nclientSocket.close()", "sub_path": "Ftp/datoscliente/ftpclient.py", "file_name": "ftpclient.py", "file_ext": "py", "file_size_in_byte": 2275, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 25, "usage_type": "attribute"}, {"api_name": "select.select", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 44, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "470758922", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\ndriver = webdriver.Chrome()\ndriver.maximize_window()\n\ntext_to_search = \"cancel order\"\nexpected_text = \"Cancel Items or Orders\"\n\ndriver.get(\"https://www.amazon.com/gp/help/customer/display.html\")\n\nsearch_box = driver.find_element(By.CSS_SELECTOR, '#helpsearch')\nsearch_box.send_keys(text_to_search)\n\n\ngo_button = driver.find_element(By.XPATH, \"//*[@id='helpSearchSubmit']//input\")\ngo_button.click()\n\nfirst_result = driver.find_element(By.XPATH, \"//div[@class='cs-help-search-results']//a[@class='a-link-normal']\")\nactual_text = first_result.text\n\nassert actual_text == expected_text, f\"Expected text is {expected_text}, but got {actual_text}\"\n\ndriver.quit()\n\n", "sub_path": "amazon_help_script.py", "file_name": "amazon_help_script.py", "file_ext": "py", "file_size_in_byte": 733, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 3, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 3, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 11, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 11, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 15, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 15, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 18, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "175277305", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.contrib.auth.models import Permission, Group\nfrom django.contrib.auth.decorators import permission_required\n\nfrom users.models import MyUser\n\n\ndef add_user_permission(request):\n if request.method == 'GET':\n # 给姓名叫admin的用户添加修改用户名的权限\n user = MyUser.objects.filter(username='admin').first()\n per = Permission.objects.filter(codename='change_myuser_username').first()\n # 添加权限\n # user.user_permissions.add(per)\n # 删除权限\n user.user_permissions.remove(per)\n # 清空权限\n user.user_permissions.clear()\n\n return HttpResponse('添加用户权限成功')\n\n\n\ndef create_user(request):\n if request.method == 'GET':\n MyUser.objects.create_user(username='admin',\n password='123123')\n return HttpResponse('创建用户成功')\n\n\ndef add_group_permission(request):\n if request.method == 'GET':\n # 创建审核组,并分配编辑\n group = Group.objects.filter(name='审核组').first()\n if group:\n per_list = ['change_myuser', 'delete_myuser',\n 'change_myuser_username',\n 'change_myuser_password']\n # 获取编辑的四个权限\n perms = Permission.objects.filter(codename__in=per_list)\n for per in perms:\n # 添加组和权限之间的关系\n group.permissions.add(per)\n # 删除组和权限之间的关系\n # group.permissions.remove(per)\n return HttpResponse('添加组和权限的关系')\n else:\n Group.objects.create(name='审核组')\n return HttpResponse('审核组没有创建,请先创建')\n\n\ndef add_user_group(request):\n if request.method == 'GET':\n # 给admin用户分配审核组\n user = MyUser.objects.filter(username='admin').first()\n group = Group.objects.filter(name='审核组').first()\n # 给admin用户分配组\n user.groups.add(group)\n return HttpResponse('分配组成功')\n\n\ndef user_permission(request):\n if request.method == 'GET':\n user = MyUser.objects.filter(username='admin').first()\n # 查询user的权限\n # 1. 用户和权限的关联表中查询\n p1 = user.user_permissions.all().values('codename')\n # 2. 通过用户查询组,通过组查询权限\n p2 = user.groups.first().permissions.all().values('codename')\n # 通过用户���取组权限\n user.get_group_permissions()\n # 通过用户查询所有的权限\n user.get_all_permissions()\n return HttpResponse('查询用户对应的权限')\n\n\n@permission_required('users.change_myuser_username')\ndef index(request):\n if request.method == 'GET':\n # change_myuser_username\n # return HttpResponse('我是首页,我需要有修改用户名的权限才能访问')\n return render(request, 'index.html')", "sub_path": "qf_1805/1.django/day08/代码/day08/users/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3091, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "users.models.MyUser.objects.filter", "line_number": 12, "usage_type": "call"}, {"api_name": "users.models.MyUser.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "users.models.MyUser", "line_number": 12, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Permission.objects.filter", "line_number": 13, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Permission.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Permission", "line_number": 13, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 21, "usage_type": "call"}, {"api_name": "users.models.MyUser.objects.create_user", "line_number": 27, "usage_type": "call"}, {"api_name": "users.models.MyUser.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "users.models.MyUser", "line_number": 27, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.filter", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 35, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Permission.objects.filter", "line_number": 41, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Permission.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Permission", "line_number": 41, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 47, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.create", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 49, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 50, "usage_type": "call"}, {"api_name": "users.models.MyUser.objects.filter", "line_number": 56, "usage_type": "call"}, {"api_name": "users.models.MyUser.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "users.models.MyUser", "line_number": 56, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group.objects.filter", "line_number": 57, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 57, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 60, "usage_type": "call"}, {"api_name": "users.models.MyUser.objects.filter", "line_number": 65, "usage_type": "call"}, {"api_name": "users.models.MyUser.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "users.models.MyUser", "line_number": 65, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 75, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 83, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "20962069", "text": "# flake8: noqa E731\nimport numpy as np\nimport pytest\nfrom sklearn.datasets import load_iris\nfrom sklearn.preprocessing import OneHotEncoder\nimport tensorflow as tf\nfrom tensorflow.keras.utils import to_categorical\nimport keras\nfrom alibi.api.defaults import DEFAULT_META_CFP, DEFAULT_DATA_CFP\nfrom alibi.datasets import fetch_adult\nfrom alibi.explainers import CounterFactualProto\nfrom alibi.utils.mapping import ord_to_ohe, ohe_to_ord, ord_to_num\n\n\n@pytest.fixture\ndef tf_keras_iris_model(request):\n if request.param == 'keras':\n k = keras\n elif request.param == 'tf':\n k = tf.keras\n else:\n raise ValueError('Unknown parameter')\n\n x_in = k.layers.Input(shape=(4,))\n x = k.layers.Dense(10, activation='relu')(x_in)\n x_out = k.layers.Dense(3, activation='softmax')(x)\n model = k.models.Model(inputs=x_in, outputs=x_out)\n model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])\n return model\n\n\n@pytest.fixture\ndef tf_keras_iris_ae(request):\n if request.param == 'keras':\n k = keras\n elif request.param == 'tf':\n k = tf.keras\n else:\n raise ValueError('Unknown parameter')\n\n # encoder\n x_in = k.layers.Input(shape=(4,))\n x = k.layers.Dense(5, activation='relu')(x_in)\n encoded = k.layers.Dense(2, activation=None)(x)\n encoder = k.models.Model(x_in, encoded)\n\n # decoder\n dec_in = k.layers.Input(shape=(2,))\n x = k.layers.Dense(5, activation='relu')(dec_in)\n decoded = k.layers.Dense(4, activation=None)(x)\n decoder = k.models.Model(dec_in, decoded)\n\n # autoencoder = encoder + decoder\n x_out = decoder(encoder(x_in))\n autoencoder = k.models.Model(x_in, x_out)\n autoencoder.compile(optimizer='adam', loss='mse')\n\n return autoencoder, encoder, decoder\n\n\n@pytest.fixture\ndef tf_keras_iris(tf_keras_iris_model, tf_keras_iris_ae):\n X, y = load_iris(return_X_y=True)\n X = (X - X.mean(axis=0)) / X.std(axis=0) # scale dataset\n\n idx = 145\n X_train, y_train = X[:idx, :], y[:idx]\n # y_train = to_categorical(y_train) # TODO: fine to leave as is?\n\n # set random seed\n np.random.seed(1)\n tf.set_random_seed(1)\n\n model = tf_keras_iris_model\n model.fit(X_train, y_train, batch_size=128, epochs=500, verbose=0)\n\n ae, enc, _ = tf_keras_iris_ae\n ae.fit(X_train, X_train, batch_size=32, epochs=100, verbose=0)\n\n return X_train, model, ae, enc\n\n\n@pytest.fixture\ndef tf_keras_iris_explainer(request, tf_keras_iris):\n X_train, model, ae, enc = tf_keras_iris\n\n if request.param[0]: # use k-d trees\n ae = None\n enc = None\n\n shape = (1, 4)\n cf_explainer = CounterFactualProto(model, shape, gamma=100, theta=100,\n ae_model=ae, enc_model=enc, use_kdtree=request.param[0],\n max_iterations=1000, c_init=request.param[1], c_steps=request.param[2],\n feature_range=(X_train.min(axis=0).reshape(shape),\n X_train.max(axis=0).reshape(shape)))\n yield X_train, model, cf_explainer\n\n\n@pytest.mark.parametrize('tf_keras_iris_explainer,use_kdtree,k', [\n ((False, 0., 1), False, None),\n ((False, 1., 3), False, None),\n ((False, 0., 1), False, 2),\n ((False, 1., 3), False, 2),\n ((True, 0., 1), True, None),\n ((True, 1., 3), True, None),\n ((True, 0., 1), True, 2),\n ((True, 1., 3), True, 2)\n], indirect=['tf_keras_iris_explainer'])\n@pytest.mark.parametrize('tf_keras_iris_model,tf_keras_iris_ae', [('tf', 'tf'), ('keras', 'keras')],\n indirect=True)\ndef test_tf_keras_iris_explainer(tf_keras_iris_explainer, use_kdtree, k):\n X_train, model, cf = tf_keras_iris_explainer\n\n # instance to be explained\n x = X_train[0].reshape(1, -1)\n pred_class = np.argmax(model.predict(x))\n not_pred_class = np.argmin(model.predict(x))\n\n # test fit\n cf.fit(X_train)\n if use_kdtree: # k-d trees\n assert len(cf.kdtrees) == cf.classes # each class has a k-d tree\n n_by_class = 0\n for c in range(cf.classes):\n n_by_class += cf.X_by_class[c].shape[0]\n assert n_by_class == X_train.shape[0] # all training instances are stored in the trees\n assert cf.kdtrees[pred_class].query(x, k=1)[0] == 0. # nearest distance to own class equals 0\n assert cf.score(x, not_pred_class, pred_class) == 0. # test score fn\n else: # encoder\n assert len(list(cf.class_proto.keys())) == cf.classes\n assert [True for _ in range(cf.classes)] == [v.shape == (1, 2) for _, v in cf.class_proto.items()]\n n_by_class = 0\n for c in range(cf.classes):\n n_by_class += cf.class_enc[c].shape[0]\n assert n_by_class == X_train.shape[0] # all training instances encoded\n\n # test explanation\n explanation = cf.explain(x, k=k)\n assert cf.id_proto != pred_class\n assert np.argmax(model.predict(explanation.cf['X'])) == explanation.cf['class']\n assert explanation.cf['grads_num'].shape == explanation.cf['grads_graph'].shape == x.shape\n assert explanation.meta.keys() == DEFAULT_META_CFP.keys()\n assert explanation.data.keys() == DEFAULT_DATA_CFP.keys()\n\n # test gradient shapes\n y = np.zeros((1, cf.classes))\n np.put(y, pred_class, 1)\n cf.predict = cf.predict.predict # make model black box\n grads = cf.get_gradients(x, y, x.shape[1:])\n assert grads.shape == x.shape\n\n\n@pytest.fixture\ndef tf_keras_adult_model(request):\n if request.param == 'keras':\n k = keras\n elif request.param == 'tf':\n k = tf.keras\n else:\n raise ValueError('Unknown parameter')\n\n x_in = k.layers.Input(shape=(57,))\n x = k.layers.Dense(60, activation='relu')(x_in)\n x = k.layers.Dense(60, activation='relu')(x)\n x_out = k.layers.Dense(2, activation='softmax')(x)\n model = k.models.Model(inputs=x_in, outputs=x_out)\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\n@pytest.fixture\ndef tf_keras_adult(tf_keras_adult_model):\n # fetch data\n adult = fetch_adult()\n X = adult.data\n X_ord = np.c_[X[:, 1:8], X[:, 11], X[:, 0], X[:, 8:11]]\n y = adult.target\n\n # scale numerical features\n X_num = X_ord[:, -4:].astype(np.float32, copy=False)\n xmin, xmax = X_num.min(axis=0), X_num.max(axis=0)\n rng = (-1., 1.)\n X_num_scaled = (X_num - xmin) / (xmax - xmin) * (rng[1] - rng[0]) + rng[0]\n\n # OHE categorical features\n X_cat = X_ord[:, :-4].copy()\n ohe = OneHotEncoder()\n ohe.fit(X_cat)\n X_cat_ohe = ohe.transform(X_cat)\n\n # combine categorical and numerical data\n X_comb = np.c_[X_cat_ohe.todense(), X_num_scaled].astype(np.float32, copy=False)\n\n # split in train and test set\n idx = 30000\n X_train, y_train = X_comb[:idx, :], y[:idx]\n\n assert X_train.shape[1] == 57\n\n # set random seed\n np.random.seed(1)\n tf.set_random_seed(1)\n\n model = tf_keras_adult_model\n model.fit(X_train, to_categorical(y_train), batch_size=128, epochs=5, verbose=0)\n\n # create categorical variable dict\n cat_vars_ord = {}\n n_categories = 8\n for i in range(n_categories):\n cat_vars_ord[i] = len(np.unique(X_ord[:, i]))\n cat_vars_ohe = ord_to_ohe(X_ord, cat_vars_ord)[1]\n\n return X_train, model, cat_vars_ohe\n\n\n@pytest.fixture\ndef tf_keras_adult_explainer(request, tf_keras_adult):\n X_train, model, cat_vars_ohe = tf_keras_adult\n\n shape = (1, 57)\n cf_explainer = CounterFactualProto(model, shape, beta=.01, cat_vars=cat_vars_ohe, ohe=True,\n use_kdtree=request.param[0], max_iterations=1000,\n c_init=request.param[1], c_steps=request.param[2],\n feature_range=(-1 * np.ones((1, 12)), np.ones((1, 12))))\n yield X_train, model, cf_explainer\n\n\n@pytest.mark.parametrize('tf_keras_adult_explainer,use_kdtree,k,d_type', [\n ((False, 1., 3), False, None, 'mvdm'),\n ((True, 1., 3), True, 2, 'mvdm'),\n ((True, 1., 3), True, 2, 'abdm'),\n], indirect=['tf_keras_adult_explainer'])\n@pytest.mark.parametrize('tf_keras_adult_model', ['tf', 'keras'], indirect=True)\ndef test_tf_keras_adult_explainer(tf_keras_adult_explainer, use_kdtree, k, d_type):\n X_train, model, cf = tf_keras_adult_explainer\n\n # instance to be explained\n x = X_train[0].reshape(1, -1)\n pred_class = np.argmax(model.predict(x))\n not_pred_class = np.argmin(model.predict(x))\n\n # test fit\n cf.fit(X_train, d_type=d_type)\n\n # checked ragged tensor shape\n n_cat = len(list(cf.cat_vars_ord.keys()))\n max_key = max(cf.cat_vars_ord, key=cf.cat_vars_ord.get)\n max_cat = cf.cat_vars_ord[max_key]\n assert cf.d_abs_ragged.shape == (n_cat, max_cat)\n\n if use_kdtree: # k-d trees\n assert len(cf.kdtrees) == cf.classes # each class has a k-d tree\n n_by_class = 0\n for c in range(cf.classes):\n n_by_class += cf.X_by_class[c].shape[0]\n assert n_by_class == X_train.shape[0] # all training instances are stored in the trees\n\n # test explanation\n explanation = cf.explain(x, k=k)\n if use_kdtree:\n assert cf.id_proto != pred_class\n assert np.argmax(model.predict(explanation.cf['X'])) == explanation.cf['class']\n num_shape = (1, 12)\n assert explanation.cf['grads_num'].shape == explanation.cf['grads_graph'].shape == num_shape\n assert explanation.meta.keys() == DEFAULT_META_CFP.keys()\n assert explanation.data.keys() == DEFAULT_DATA_CFP.keys()\n\n # test gradient shapes\n y = np.zeros((1, cf.classes))\n np.put(y, pred_class, 1)\n cf.predict = cf.predict.predict # make model black box\n # convert instance to numerical space\n x_ord = ohe_to_ord(x, cf.cat_vars)[0]\n x_num = ord_to_num(x_ord, cf.d_abs)\n # check gradients\n grads = cf.get_gradients(x_num, y, num_shape[1:], cf.cat_vars_ord)\n assert grads.shape == num_shape\n", "sub_path": "alibi/explainers/tests/test_cfproto.py", "file_name": "test_cfproto.py", "file_ext": "py", "file_size_in_byte": 9980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "tensorflow.keras", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sklearn.datasets.load_iris", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tensorflow.set_random_seed", "line_number": 72, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 61, "usage_type": "attribute"}, {"api_name": "alibi.explainers.CounterFactualProto", "line_number": 92, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 141, "usage_type": "call"}, {"api_name": "alibi.api.defaults.DEFAULT_META_CFP.keys", "line_number": 143, "usage_type": "call"}, {"api_name": "alibi.api.defaults.DEFAULT_META_CFP", "line_number": 143, "usage_type": "name"}, {"api_name": "alibi.api.defaults.DEFAULT_DATA_CFP.keys", "line_number": 144, "usage_type": "call"}, {"api_name": "alibi.api.defaults.DEFAULT_DATA_CFP", "line_number": 144, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.put", "line_number": 148, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 100, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 110, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 110, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 154, "usage_type": "attribute"}, {"api_name": "alibi.datasets.fetch_adult", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 177, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 181, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 202, "usage_type": "attribute"}, {"api_name": "tensorflow.set_random_seed", "line_number": 203, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 212, "usage_type": "call"}, {"api_name": "alibi.utils.mapping.ord_to_ohe", "line_number": 213, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 172, "usage_type": "attribute"}, {"api_name": "alibi.explainers.CounterFactualProto", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 226, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 218, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 264, "usage_type": "call"}, {"api_name": "alibi.api.defaults.DEFAULT_META_CFP.keys", "line_number": 267, "usage_type": "call"}, {"api_name": "alibi.api.defaults.DEFAULT_META_CFP", "line_number": 267, "usage_type": "name"}, {"api_name": "alibi.api.defaults.DEFAULT_DATA_CFP.keys", "line_number": 268, "usage_type": "call"}, {"api_name": "alibi.api.defaults.DEFAULT_DATA_CFP", "line_number": 268, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.put", "line_number": 272, "usage_type": "call"}, {"api_name": "alibi.utils.mapping.ohe_to_ord", "line_number": 275, "usage_type": "call"}, {"api_name": "alibi.utils.mapping.ord_to_num", "line_number": 276, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 230, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 230, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 235, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 235, "usage_type": "attribute"}]} +{"seq_id": "374006593", "text": "from pyspark import SparkContext, SparkConf, conf\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col\n\nif __name__ == \"__main__\":\n session = SparkSession.builder.appName(\"Stocks\").master(\"local[*]\").getOrCreate()\n\n stocks = session.read \\\n .option(\"header\", \"true\") \\\n .option(\"inferSchema\", value=True) \\\n .csv(\"stock_prices.csv\")\n\n stocks.createOrReplaceTempView(\"stocksPriceView\")\n\n # stocksAverage = session.sql(\"SELECT date,AVG((close-open)*volume) AS ResultAverage FROM stocksPriceView GROUP BY date\")\n # stocksAverage.select(\"date\", \"ResultAverage\").coalesce(1).write.save(\"averageStockPrice.csv\", format=\"csv\",\n # header=\"true\")\n\n stocksMostFrequently = session.sql(\n \"SELECT ticker, avg(close*volume) as averageStockPrices from stocksPriceView group by ticker order by averageStockPrices desc limit 1\")\n stocksMostFrequently.show()", "sub_path": "stocks.py", "file_name": "stocks.py", "file_ext": "py", "file_size_in_byte": 984, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 6, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "289240147", "text": "import tkinter as tk\r\nfrom PIL import ImageTk, Image\r\n\r\nroot = tk.Tk()\r\n\r\nimg = ImageTk.PhotoImage(Image.open(\"img1.PNG\"))\r\n\r\nlabel = tk.Label(root, image=img)\r\nlabel.grid(column=0, row=0)\r\n\r\nroot.mainloop()", "sub_path": "tkinter/tkinter03[image].py", "file_name": "tkinter03[image].py", "file_ext": "py", "file_size_in_byte": 207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "tkinter.Tk", "line_number": 4, "usage_type": "call"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 6, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 6, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "275194947", "text": "#!/usr/bin/env python3\n'''\n * @time: Created on 2018/01/15 19:28\n * @author: by Ysan\n'''\n\nimport logging\nfrom conf import settings\n\n\ndef logger(log_type):\n logger = logging.getLogger(log_type)\n logger.setLevel(settings.LOG_LEVEL)\n\n log_file = \"%s/log/%s\" % (settings.BASE_DIR, settings.LOG_TYPES[log_type])\n fh = logging.FileHandler(log_file)\n fh.setLevel(settings.LOG_LEVEL)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n", "sub_path": "FTP_Server/ftp_server/core/logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 560, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "conf.settings.LOG_LEVEL", "line_number": 13, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "conf.settings.BASE_DIR", "line_number": 15, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 15, "usage_type": "name"}, {"api_name": "conf.settings.LOG_TYPES", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 16, "usage_type": "call"}, {"api_name": "conf.settings.LOG_LEVEL", "line_number": 17, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "logging.Formatter", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "652994781", "text": "import serial\n\nport = \"COM4\"\nser = serial.Serial(port, 9600, timeout=4.0)\nwhile True:\n ser.flushInput()\n response = ser.readline().strip()\n values = response.decode('utf-8').split(',')\n print('Soil Moisture = ', values ,'%')\n print(' ')\n", "sub_path": "Programas/Computer Port Reading/Geral/Serial Reading.py", "file_name": "Serial Reading.py", "file_ext": "py", "file_size_in_byte": 252, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "serial.Serial", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "81948660", "text": "import sqlalchemy\nimport time\nimport csv\nfrom sqlalchemy.engine import url as sqlalchemy_url\nfrom sqlalchemy.orm import sessionmaker\nfrom sql_alchemy_orm_schema import *\nfrom constants import common_constants\n\n\nclass SqlAlchemySession(object):\n def __init__(self):\n # Defining environment variables for accessing private information\n self.DATABASE_USER = common_constants.REDSHIFT_USERNAME\n self.DATABASE_PASS = common_constants.REDSHIFT_PASSWORD\n self.DATABASE_NAME = common_constants.REDSHIFT_DATABASE_NAME\n self.DATABASE_PORT = common_constants.REDSHIFT_PORT\n self.DATABASE_ENDPOINT = common_constants.REDSHIFT_CLUSTER_ENDPOINT\n self.DATABASE_DIALECT = common_constants.DATABASE_DIALECT\n self.DATABASE_DRIVER = common_constants.DATABASE_DRIVER\n\n db_connect_url = sqlalchemy_url.URL(\n drivername=self.DATABASE_DIALECT + '+' + self.DATABASE_DRIVER,\n username=self.DATABASE_PASS,\n password=self.DATABASE_PASS,\n host=self.DATABASE_ENDPOINT,\n port=self.DATABASE_PORT,\n database=self.DATABASE_NAME)\n\n self.engine = sqlalchemy.create_engine(db_connect_url)\n # Base.metadata.create_all(bind=self.engine)\n self.Session = sessionmaker(bind=self.engine)\n\n\ndef rollback_and_close(session):\n session.rollback()\n session.close()\n\n\n# ===================================== Validation Queries ===================================== #\ndef validate_project_id(project_id, session):\n \"\"\"\n rtype: boolean\n return: True if queried project id exists\n \"\"\"\n validated_project_id = session.query(TeamProject).filter_by(id=project_id)\n if not validated_project_id:\n raise Exception\n\n\ndef validate_team_name(team_name, session):\n \"\"\"\n rtype: boolean\n return: True if queried team name exists\n \"\"\"\n result = session.query(Team).filter_by(name=team_name)\n return True if result else False\n\n\ndef validate_team_by_id(team_id, session):\n \"\"\"\n rtype: boolean\n return: True if queried team id exists\n \"\"\"\n result = session.query(Team).filter_by(id=team_id)\n return True if result else False\n\n\ndef validate_project_name(project_name, session):\n \"\"\"\n rtype: boolean\n return: True if queried project name exists\n \"\"\"\n result = session.query(TeamProject).filter(sqlalchemy.func.lower(TeamProject.name) == project_name.lower()).all()\n return True if result else False\n\n\ndef validate_project_name_change(project_id, project_name, session):\n \"\"\"\n rtype: boolean\n return: True if queried project name exists in other projects\n \"\"\"\n result = session.query(TeamProject.id).filter(TeamProject.name.lower() == project_name.lower())\n return result and project_id != result[0]\n\n\ndef validate_work_types(project_id, work_types_list, session):\n return session.query(TeamWorkTypes) \\\n .filter(TeamWorkTypes.team_project_id == project_id) \\\n .filter(TeamWorkTypes.work_type in work_types_list)\n\n\n# ===================================== Insert Queries ===================================== #\ndef insert_team(team_name, session):\n session.add(Team(name=team_name))\n session.commit()\n\n\ndef insert_team_project(team_project_name,\n team_id,\n jira_board_name,\n jira_board_id,\n jira_issue_filter,\n default_lead_time_start_state,\n default_lead_time_end_state,\n rolling_time_window_days,\n include_subtasks,\n excluded_issue_types_str,\n session):\n new_project = TeamProject(name=team_project_name,\n team_id=team_id,\n board_name=jira_board_name,\n board_id=jira_board_id,\n issue_filter=jira_issue_filter,\n default_lead_time_start_state=default_lead_time_start_state,\n default_lead_time_end_state=default_lead_time_end_state,\n rolling_time_window_days=rolling_time_window_days,\n include_subtasks=include_subtasks,\n excluded_issue_types=excluded_issue_types_str)\n session.add(new_project)\n session.commit()\n\n\ndef insert_jira_issue_types(session):\n fetch_issue_change_types = session.query(sqlalchemy.distinct(IssueChange.issue_type),\n IssueChange.subtask,\n IssueChange.team_project_id) \\\n .join(TeamJiraIssueTypes, ((TeamJiraIssueTypes.team_project_id == IssueChange.team_project_id) &\n (TeamJiraIssueTypes.issue_type == IssueChange.issue_type) &\n (TeamJiraIssueTypes.subtask == IssueChange.subtask))) \\\n .filter(TeamJiraIssueTypes.team_project_id is None)\n existing_issue_types = fetch_issue_change_types.all()\n\n session.add_all([TeamJiraIssueTypes(team_project_id=issue_type.project_id,\n issue_type=issue_type.issue_type,\n subtask=issue_type.subtask) for issue_type in existing_issue_types])\n session.commit()\n\n\ndef insert_new_issue_types(project_id, issue_type_list, session):\n issue_types_insert = [TeamWorkTypes(team_project_id=project_id,\n issue_type=issue_type,\n work_type=issue_type) for issue_type in issue_type_list]\n session.add_all(issue_types_insert)\n session.commit()\n\n\n# ===================================== Update Queries ===================================== #\ndef update_default_lead_time_states(project_id, start_state, end_state, session):\n project_to_update = session.query(TeamProject).filter_by(id=project_id).one()\n project_to_update.default_lead_time_start_state = start_state\n project_to_update.default_lead_time_end_state = end_state\n session.commit()\n\n\ndef update_issues(project_id,\n board_name,\n include_subtasks,\n issue_filter,\n all_issue_types,\n excluded_types_list,\n project_name,\n session):\n exists_types = session.query(TeamWorkTypes.issue_type) \\\n .filter_by(project_id=project_id).all()\n exists_types_list = [result[0] for result in exists_types]\n\n excluded_issue_types = ','.join(excluded_types_list)\n\n current_issue_filter = session.query(TeamProject.issue_filter) \\\n .filter_by(id=project_id).one()\n current_filter_result = current_issue_filter[0]\n\n if current_filter_result != issue_filter:\n reset_watermark = session.query(TeamProject).filter_by(id=project_id).one()\n reset_watermark.last_issue_change = 0\n\n issue_to_delete = session.query(IssueChange).filter_by(team_project_id=project_id).one()\n session.delete(issue_to_delete)\n\n work_type_to_delete = session.query(TeamWorkTypes).filter_by(team_project_id=project_id).one()\n session.delete(work_type_to_delete)\n\n issue_type_to_delete = session.query(TeamJiraIssueTypes).filter_by(team_project_id=project_id).one()\n session.delete(issue_type_to_delete)\n\n remove_types_list = excluded_issue_types\n if not include_subtasks:\n if all([issue_type['subtask'] for issue_type in all_issue_types]):\n raise ValueError(\"All issue types are sub tasks. Sub tasks need to be included all the time.\")\n remove_types_list.extend([issue_type['name'] for issue_type in all_issue_types if issue_type['subtask']])\n all_issue_types = [issue_type['name'] for issue_type in all_issue_types\n if (include_subtasks or not issue_type['subtask'])]\n insert_issue_type_list = [issue_type for issue_type in all_issue_types\n if issue_type not in exists_types_list and issue_type not in excluded_types_list]\n if all_issue_types and set(exists_types_list).issubset(set(remove_types_list)) and not insert_issue_type_list:\n raise ValueError(\"You have excluded all issue types allowed for this project. \"\n \"Please include at least one issue type.\")\n\n update_project = session.query(TeamProject).filter_by(id=project_id).one()\n\n update_project.name = project_name,\n update_project.board_name = board_name,\n update_project.include_subtasks = include_subtasks,\n update_project.excluded_issue_types = excluded_issue_types,\n update_project.issue_filter = issue_filter\n\n for issue_type in insert_issue_type_list:\n session.add(TeamWorkTypes(team_project_id=project_id,\n issue_type=issue_type,\n work_type=issue_type))\n\n if remove_types_list:\n # issue_type_to_delete = session.query(TeamWorkTypes).filter((TeamWorkTypes == project_id) &\n # (TeamWorkTypes.issue_type in tuple(\n # remove_types_list)))\n issue_types_to_delete = TeamWorkTypes.__table__.delete()\\\n .where((TeamWorkTypes.team_project_id == project_id) &\n (TeamWorkTypes.issue_type in tuple(remove_types_list)))\n session.execute(issue_types_to_delete)\n\n session.commit()\n\n\ndef update_team_status_states(project_id, status_states_list, session):\n # This seems to be the best way of executing a delete all query\n delete_status_states_query = TeamStatusStates.__table__.delete()\\\n .where(TeamStatusStates.team_project_id == project_id)\n session.execute(delete_status_states_query)\n\n # TODO refactor passed status_states_list to list of TeamStatusStates to simplify this\n status_states = [TeamStatusStates(team_project_id=status_state[0],\n status=status_state[1],\n state_name=status_state[2]) for status_state in status_states_list]\n session.add_all(status_states)\n session.commit()\n\n\ndef update_team_work_states(project_id, work_states_list, session):\n # This seems to be the best way of executing a delete all query\n delete_team_work_states_query = TeamWorkStates.__table__.delete()\\\n .where(TeamWorkStates.team_project_id == project_id)\n session.execute(delete_team_work_states_query)\n\n # TODO refactor passed work_states_list to list of TeamWorkStates to simplify this\n work_states = [TeamWorkStates(team_project_id=work_state[0],\n state_name=work_state[1],\n seq_number=work_state[2]) for work_state in work_states_list]\n\n session.add_all(work_states)\n session.commit()\n\n\ndef update_team_work_types(project_id, work_types_list, session):\n # This seems to be the best way of executing a delete all query\n delete_team_work_types_query = TeamWorkTypes.__table__.delete()\\\n .where(TeamWorkTypes.team_project_id == project_id)\n session.execute(delete_team_work_types_query)\n\n # TODO refactor passed work_types_list to list of TeamWorkTypes to simplify this\n work_types = [TeamWorkTypes(team_project_id=work_type[0],\n issue_type=work_type[1],\n work_type=work_type[2]) for work_type in work_types_list]\n\n session.add_all(work_types)\n session.commit()\n\n\ndef update_team_repos(project_id, repos_list, session):\n # This seems to be the best way of executing a delete all query\n delete_team_repo_query = TeamRepo.__table__.delete()\\\n .where(TeamRepo.team_project_id == project_id)\n session.execute(delete_team_repo_query)\n\n # TODO refactor passed repo_list to list of TeamRepo to simplify this\n team_repos = [TeamRepo(team_project_id=team_repo[0],\n repo_name=team_repo[1]) for team_repo in repos_list]\n\n session.add_all(team_repos)\n session.commit()\n\n\ndef update_last_etl(team_config, session):\n project_id = team_config.get('id')\n update_start_time = int(time.time())\n\n try:\n last_etl = session.query(TeamProjectEtl).filter_by(team_project_id=project_id).one()\n\n if (not last_etl.last_etl_run) or ((update_start_time - last_etl.last_etl_run) > 300):\n last_etl.last_etl_run = update_start_time\n else:\n print(\"ERROR: ETL for project {} is already running at time {}\".format(\n team_config.get(\"name\"), last_etl.last_etl_run))\n return\n except sqlalchemy.orm.exc.NoResultFound:\n session.add(TeamProjectEtl(team_project_id=project_id,\n last_etl_run=update_start_time))\n session.commit()\n return {\n \"update_start_time\": update_start_time\n }\n\n\ndef reset_last_etl(team_id, session):\n last_etl = session.query(TeamProjectEtl).filter_by(team_project_id=team_id).one()\n last_etl.last_etl_run = None\n session.commit()\n\n\ndef update_last_issue_change_from_csv(csv_path, last_issue_change, team_id, session):\n # parse csv into map, bulk insert\n local_csv_file = open(csv_path, 'r')\n reader = csv.DictReader(local_csv_file)\n\n rows_to_insert = [IssueChange(team_project_id=row['team_project_id'],\n changed=row['changed'],\n issue_key=row['issue_key'],\n field_name=row['field_name'],\n prev_value=row['prev_value'],\n new_value=row['new_value'],\n issue_type=row['issue_type'],\n resolution=row['resolution'],\n subtask=row['subtask']) for row in reader]\n session.add_all(rows_to_insert)\n local_csv_file.close()\n\n project_to_update = session.query(TeamProject).filter_by(id=team_id)\n project_to_update.last_issue_change = last_issue_change\n session.commit()\n\n\n# ===================================== Fetch Queries ===================================== #\ndef fetch_teams(session):\n return session.query(Team).all()\n\n\ndef fetch_team_by_name(team_name, session):\n return session.query(Team).filter_by(name=team_name).all()\n\n\ndef fetch_team_by_id(team_id, session):\n return session.query(Team).filter_by(id=team_id).all()\n\n\ndef fetch_team_id(team_name, session):\n \"\"\"\n rtype: int\n return: team id\n \"\"\"\n result = session.query(Team.id).filter_by(name=team_name).one()\n return result[0]\n\n\ndef fetch_teams_for_scheduler(session):\n return session.query(TeamProject.id, TeamProject.name) \\\n .order_by((TeamProject.last_issue_change is None), TeamProject.last_issue_change) \\\n .all()\n\n\ndef fetch_team_from_project(team_id, project_name=None, session=None):\n if session is None:\n raise ValueError(\"sql alchemy session needs to be passed\")\n\n fetch_team_from_project_query = session.query(TeamProject.name, TeamProject.id) \\\n .filter_by(team_id=team_id)\n\n fetch_team_from_project_query_by_name = session.query(TeamProject.name, TeamProject.id) \\\n .filter_by(team_id=team_id, name=project_name)\n\n if project_name is not None:\n return fetch_team_from_project_query_by_name.all()\n else:\n return fetch_team_from_project_query.all()\n\n\ndef fetch_project_id(project_name, session):\n \"\"\"\n rtype: int\n return: project id\n \"\"\"\n return session.query(TeamProject.id).filter_by(name=project_name).one()\n\n\ndef fetch_board_id(project_id, session):\n \"\"\"\n rtype: int\n return: board id\n \"\"\"\n board_id = session.query(TeamProject.board_id).filter_by(id=project_id).one()\n return board_id[0]\n\n\ndef fetch_issue_configuration(project_id, session):\n \"\"\"\n rtype: list [(board_name(str), board_id(int), rolling_time_window_days(int), issue_filter(str),\n last_issue_change(int), include_subtasks(bool), excluded_issue_types(str))]\n return: requested column values from team_project with matching project id\n \"\"\"\n return session.query(TeamProject.board_name,\n TeamProject.board_id,\n TeamProject.rolling_time_window_days,\n TeamProject.issue_filter,\n TeamProject.last_issue_change,\n TeamProject.include_subtasks,\n TeamProject.excluded_issue_types).filter_by(id=project_id).all()\n\n\ndef fetch_lead_time_start_state(project_id, session):\n \"\"\"\n rtype: str\n return: default lead time start state\n \"\"\"\n result = session.query(TeamProject.default_lead_time_start_state).filter_by(id=project_id).one()\n return result[0]\n\n\ndef fetch_lead_time_end_state(project_id, session):\n \"\"\"\n rtype: str\n return: default lead time end state\n \"\"\"\n result = session.query(TeamProject.default_lead_time_end_state).filter_by(id=project_id).one()\n return result[0]\n\n\ndef fetch_team_config_from_project(team_id, session):\n result = session.query(TeamProject.id,\n TeamProject.name,\n TeamProject.issue_filter,\n TeamProject.last_issue_change) \\\n .filter_by(id=team_id).one()\n return {\n 'id': result[0],\n 'name': result[1],\n 'issue_filter': result[2],\n 'last_issue_change': result[3]\n }\n\n\ndef fetch_issue_type_exclusions(project_id, session):\n return session.query(TeamProject.excluded_issue_types,\n TeamProject.include_subtasks) \\\n .filter_by(id=project_id).one()\n\n\ndef fetch_excluded_issue_types_from_team_project(session):\n return session.query(TeamProject.id,\n TeamProject.excluded_issue_types,\n TeamProject.include_subtasks).all()\n\n\ndef fetch_rolling_window(project_id, session):\n \"\"\"\n rtype: int\n return: number of rolling window days\n \"\"\"\n result = session.query(TeamProject.rolling_time_window_days).filter_by(id=project_id).one()\n return result[0]\n\n\ndef fetch_repos(project_id, session):\n \"\"\"\n rtype: list[str]\n return: repository names that belongs to the project\n \"\"\"\n results = session.query(TeamRepo.repo_name).filter_by(team_project_id=project_id).all()\n return [result[0] for result in results] if results else []\n\n\ndef fetch_last_etl_run(project_id, session):\n return session.query(TeamProjectEtl.last_etl_run).filter_by(team_project_id=project_id).one()\n\n\ndef fetch_merged_pr_count(project_id, repo_names, date_since, date_until, session):\n \"\"\"\n rtype: list [(pr_count(int), week(datetime))]\n return: total pull request count per corresponding week\n \"\"\"\n return session.query(sqlalchemy.func.count(PullRequests.pr_number),\n sqlalchemy.func.date_trunc('week', PullRequests.merged_at)) \\\n .join(TeamRepo, PullRequests.repo == TeamRepo.repo_name) \\\n .filter((TeamRepo.team_project_id == project_id) &\n (sqlalchemy.between(PullRequests.merged_at, date_since, date_until)) &\n (PullRequests.repo in tuple(repo_names))) \\\n .group_by(sqlalchemy.func.date_trunc('week', PullRequests.merged_at)) \\\n .order_by(sqlalchemy.func.date_trunc('week', PullRequests.merged_at)).all()\n\n\ndef fetch_merged_pull_requests_timestamp(project_id, repo_names, date_since, date_until, session):\n return session.query(PullRequests.created_at, PullRequests.merged_at) \\\n .join(TeamRepo, TeamRepo.repo_name == PullRequests.repo) \\\n .filter(TeamRepo.team_project_id == project_id,\n sqlalchemy.between(PullRequests.created_at, date_since, date_until),\n sqlalchemy.between(PullRequests.merged_at, date_since, date_until),\n PullRequests.repo in repo_names) \\\n .order_by(PullRequests.merged_at).all()\n\n\ndef fetch_failed_pull_requests_volume(project_id, repo_names, date_since, date_until, session):\n return session.query(PullRequests.pr_number,\n PullRequests.created_at,\n PullRequests.closed_at,\n (PullRequests.lines_added + PullRequests.lines_deleted).label('volume')) \\\n .join(TeamRepo, TeamRepo.repo_name == PullRequests.repo) \\\n .filter((TeamRepo.team_project_id == project_id) &\n (PullRequests.closed_at is not None) &\n (PullRequests.created_at < date_until) &\n (PullRequests.closed_at > date_since) &\n (PullRequests.repo in tuple(repo_names))) \\\n .order_by(PullRequests.created_at).all()\n\n\ndef fetch_issue_types(project_id, session):\n return session.query(TeamJiraIssueTypes.issue_type, TeamJiraIssueTypes.subtask) \\\n .filter_by(team_project_id=project_id).all()\n\n\ndef fetch_project_work_types(project_id, session):\n return session.query(TeamWorkTypes.work_type,\n TeamWorkTypes.issue_type,\n TeamJiraIssueTypes.subtask) \\\n .join(TeamJiraIssueTypes, ((TeamWorkTypes.team_project_id == project_id) &\n (TeamWorkTypes.issue_type == TeamJiraIssueTypes.issue_type))) \\\n .filter(TeamWorkTypes.team_project_id == project_id).all()\n\n\n# TODO should be merged with function above\ndef fetch_team_work_types(project_id, session):\n return session.query(TeamWorkTypes.issue_type) \\\n .filter_by(team_project_id=project_id).all()\n\n\ndef fetch_work_states(project_id, session):\n \"\"\"\n rtype: list[(state_name(str)]\n return: list of state names and its sequence\n \"\"\"\n work_states_tuple = session.query(TeamWorkStates.state_name) \\\n .filter_by(team_project_id=project_id) \\\n .order_by(TeamWorkStates.seq_number).all()\n return [work_state[0] for work_state in work_states_tuple]\n\n\ndef fetch_sequence_of_start_states_and_end_states_for_project(project_id, session):\n return session.query(TeamWorkStates.seq_number) \\\n .join(TeamProject, TeamProject.id == TeamWorkStates.team_project_id) \\\n .filter((TeamWorkStates.team_project_id == project_id) &\n ((TeamWorkStates.state_name == TeamProject.default_lead_time_end_state) |\n (TeamWorkStates.state_name == TeamProject.default_lead_time_start_state))) \\\n .order_by(TeamWorkStates.seq_number).all()\n\n\ndef fetch_state_and_sequence_from_project(project_id, session):\n return session.query(TeamWorkStates.state_name,\n TeamWorkStates.seq_number) \\\n .filter_by(team_project_id=project_id) \\\n .order_by(TeamWorkStates.seq_number).all()\n\n\ndef fetch_status_list_of_state(project_id, state_name, session):\n \"\"\"\n rtype: list[str]\n return: all possible status states of the project\n \"\"\"\n state_results = session.query(TeamStatusStates.status).filter_by(team_project_id=project_id,\n state_name=state_name).all()\n return [result[0] for result in state_results]\n\n\n# TODO this function needs to be scrutinized.\ndef fetch_issues_in_valid_states(project_id, valid_issue_types, invalid_resolutions, session):\n valid_issue_types_filter_flag = 1 if valid_issue_types else 0\n valid_issue_types_filter = valid_issue_types if valid_issue_types else []\n invalid_resolutions_filter_flag = 1 if invalid_resolutions else 0\n invalid_resolutions_filter = invalid_resolutions if invalid_resolutions else []\n\n join_query = session.query(TeamStatusStates.team_project_id,\n TeamStatusStates.status,\n TeamStatusStates.state_name,\n TeamWorkStates.seq_number) \\\n .outerjoin(TeamWorkStates, ((TeamStatusStates.team_project_id == TeamWorkStates.team_project_id) &\n (TeamStatusStates.state_name == TeamWorkStates.state_name))).subquery()\n\n prev_number_seq_query = session.query(join_query) \\\n .outerjoin(IssueChange, ((join_query.c.team_project_id == IssueChange.team_project_id) &\n (join_query.c.status == IssueChange.prev_value))).subquery()\n\n new_number_seq_query = session.query(join_query) \\\n .outerjoin(IssueChange, ((join_query.c.team_project_id == IssueChange.team_project_id) &\n (join_query.c.status == IssueChange.new_value))).subquery()\n\n # `==` operator must be used for sqlalchemy, as it doesn't overload the `is` operator\n prev_number_seq_case_query = sqlalchemy.sql.expression \\\n .case([(prev_number_seq_query.c.seq_number == None, -1)],\n else_=prev_number_seq_query.c.seq_number)\n\n # `==` operator must be used for sqlalchemy, as it doesn't overload the `is` operator\n new_number_seq_case_query = sqlalchemy.sql.expression \\\n .case([(new_number_seq_query.c.seq_number == None, -1)],\n else_=new_number_seq_query.c.seq_number)\n\n prev_number_seq = sqlalchemy.func.array_agg(prev_number_seq_case_query).label('prev_number_seq')\n new_number_seq = sqlalchemy.func.array_agg(new_number_seq_case_query).label('new_number_seq')\n issue_changed_agg = sqlalchemy.func.array_agg(IssueChange.changed).label('changed_seq')\n\n return session.query(IssueChange.issue_key,\n prev_number_seq,\n new_number_seq,\n issue_changed_agg) \\\n .outerjoin(prev_number_seq_query, ((prev_number_seq_query.c.team_project_id == IssueChange.team_project_id) &\n (prev_number_seq_query.c.status == IssueChange.prev_value))) \\\n .outerjoin(new_number_seq_query, ((new_number_seq_query.c.team_project_id == IssueChange.team_project_id) &\n (new_number_seq_query.c.status == IssueChange.new_value))) \\\n .filter((IssueChange.team_project_id == project_id) &\n (IssueChange.field_name == 'Status') &\n ((valid_issue_types_filter_flag == 0) | (IssueChange.issue_type in valid_issue_types_filter)) &\n ((invalid_resolutions_filter_flag == 0) | (IssueChange.resolution not in invalid_resolutions_filter))) \\\n .group_by(IssueChange.issue_key).all()\n\n\ndef fetch_completion_event_statuses(project_id, session):\n start_state_query = session.query(TeamWorkStates.team_project_id,\n TeamWorkStates.seq_number) \\\n .join(TeamProject, ((TeamWorkStates.team_project_id == TeamProject.id) &\n (TeamWorkStates.state_name == TeamProject.default_lead_time_start_state))) \\\n .filter(TeamWorkStates.team_project_id == project_id) \\\n .subquery()\n\n end_state_query = session.query(TeamWorkStates.team_project_id,\n TeamWorkStates.seq_number) \\\n .join(TeamProject, ((TeamWorkStates.team_project_id == TeamProject.id) &\n (TeamWorkStates.state_name == TeamProject.default_lead_time_end_state))) \\\n .filter(TeamWorkStates.team_project_id == project_id) \\\n .subquery()\n\n state_composition_query = session.query(TeamProject.id,\n start_state_query.c.seq_number.label('start_state'),\n end_state_query.c.seq_number.label('end_state')) \\\n .subquery()\n\n is_complete_query = session.query(TeamStatusStates.status,\n (TeamWorkStates.seq_number >= state_composition_query.c.end_state).label('is_complete_status')) \\\n .filter((TeamProject.id == project_id) &\n (state_composition_query.c.id == TeamProject.id) &\n (TeamWorkStates.team_project_id == TeamProject.id) &\n (TeamWorkStates.seq_number >= state_composition_query.c.start_state) &\n (TeamStatusStates.team_project_id == TeamProject.id) &\n (TeamStatusStates.state_name == TeamWorkStates.state_name))\n\n return is_complete_query.all()\n\n\ndef fetch_backlog_changes(project_id,\n end_date,\n completed_status_list,\n issue_type_list,\n issue_type_flag,\n session):\n completed = session.query(sqlalchemy.func.max(IssueChange.changed).label('changed'),\n sqlalchemy.sql.expression.literal('Completed').label('new_value'),\n IssueChange.issue_key) \\\n .filter((IssueChange.team_project_id == project_id) &\n (IssueChange.new_value in completed_status_list) &\n (IssueChange.changed < end_date) &\n (IssueChange.field_name == 'Status') &\n ((issue_type_flag == 0) | (IssueChange.issue_type in issue_type_list))) \\\n .group_by(IssueChange.issue_key) \\\n .subquery()\n\n join_with_uncompleted = session.query(completed.c.changed,\n sqlalchemy.sql.expression.literal('Completed').label('new_value'),\n completed.c.issue_key) \\\n .join(IssueChange, ((IssueChange.issue_key == completed.c.issue_key) &\n (IssueChange.changed > completed.c.changed) &\n (IssueChange.new_value not in completed_status_list))) \\\n .filter((IssueChange.changed == None)) \\\n .order_by(completed.c.changed)\n\n issue_change_query = session.query(IssueChange.changed,\n IssueChange.new_value,\n IssueChange.issue_key) \\\n .filter((IssueChange.team_project_id == project_id) &\n (IssueChange.prev_value == '') &\n (IssueChange.changed < end_date) &\n (IssueChange.field_name == 'Status') &\n ((issue_type_flag == 0) | (IssueChange.issue_type in issue_type_list)))\n\n return issue_change_query.union(join_with_uncompleted).all()\n\n\ndef fetch_throughput_history(project_id,\n last_week,\n first_week,\n completed_list,\n working_list,\n issue_type_flag,\n issue_type_list,\n invalid_resolutions_flag,\n invalid_resolutions_list,\n session):\n return session.query(sqlalchemy.func.max(IssueChange.changed).label('maxdate'),\n IssueChange.issue_key) \\\n .filter((IssueChange.team_project_id == project_id) &\n (IssueChange.changed < last_week) &\n (IssueChange.changed >= first_week) &\n (IssueChange.new_value in completed_list) &\n (IssueChange.prev_value in working_list) &\n ((issue_type_flag == 0) | (IssueChange.issue_type in issue_type_list)) &\n ((invalid_resolutions_flag == 0) | (IssueChange.resolution not in invalid_resolutions_list))) \\\n .group_by(IssueChange.issue_key) \\\n .order_by(sqlalchemy.asc('maxdate')).all()\n\n\n# TODO this function may be able to be rolled into the one above\ndef fetch_throughput_predictability(project_id,\n last_week,\n first_week,\n completed_list,\n working_list,\n issue_type_flag,\n issue_type_list,\n invalid_resolutions_flag,\n invalid_resolutions_list,\n session):\n return session.query(sqlalchemy.func.max(IssueChange.changed).label('maxdate')) \\\n .filter((IssueChange.team_project_id == project_id) &\n (IssueChange.changed < last_week) &\n (IssueChange.changed >= first_week) &\n (IssueChange.new_value in completed_list) &\n (IssueChange.prev_value in working_list) &\n ((issue_type_flag == 0) | (IssueChange.issue_type in issue_type_list)) &\n ((invalid_resolutions_flag == 0) | (IssueChange.resolution not in invalid_resolutions_list))) \\\n .group_by(IssueChange.issue_key) \\\n .order_by(sqlalchemy.asc('maxdate')).all()\n\n\n# TODO this function needs to be scrutinized.\ndef fetch_quarterly_throughput(project_id,\n quarters,\n completed_list,\n working_list,\n issue_type_flag,\n issue_type_list,\n invalid_resolutions_flag,\n invalid_resolutions_list,\n session):\n num_of_quarters = len(quarters)\n\n loop_counter = 0\n index_of_quarter_dict = num_of_quarters - 1\n # build all but last part of select string\n\n quarters_tuple = []\n while loop_counter < num_of_quarters - 1:\n quarters_tuple += (quarters[index_of_quarter_dict - 1], quarters[index_of_quarter_dict])\n loop_counter += 1\n index_of_quarter_dict -= 1\n\n composed_query = [sqlalchemy.func.sum(\n sqlalchemy.sql.expression.case(\n ((IssueChange.changed < quarter[0]) &\n (IssueChange.changed >= quarter[1])))).label('q' + str(index)) for index, quarter\n in enumerate(quarters_tuple)]\n\n return session.query(composed_query)\\\n .filter((IssueChange.team_project_id == project_id) &\n (IssueChange.new_value in completed_list) &\n (IssueChange.prev_value in working_list) &\n ((issue_type_flag == 0) | (IssueChange.issue_type in issue_type_list)) &\n ((invalid_resolutions_flag == 0) | (IssueChange.resolution not in invalid_resolutions_list))).all()\n\n\ndef fetch_commits_from_tags(repo, date_since, date_until, session):\n return session.query(Tags.commit_time, Tags.name)\\\n .filter((Tags.repo == repo) &\n (sqlalchemy.between(Tags.commit_time, date_since, date_until)))\\\n .order_by(Tags.commit_time).all()\n", "sub_path": "source/commons/vger_commons/python/database_connection/sql_alchemy_session.py", "file_name": "sql_alchemy_session.py", "file_ext": "py", "file_size_in_byte": 34590, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "constants.common_constants.REDSHIFT_USERNAME", "line_number": 13, "usage_type": "attribute"}, {"api_name": "constants.common_constants", "line_number": 13, "usage_type": "name"}, {"api_name": "constants.common_constants.REDSHIFT_PASSWORD", "line_number": 14, "usage_type": "attribute"}, {"api_name": "constants.common_constants", "line_number": 14, "usage_type": "name"}, {"api_name": "constants.common_constants.REDSHIFT_DATABASE_NAME", "line_number": 15, "usage_type": "attribute"}, {"api_name": "constants.common_constants", "line_number": 15, "usage_type": "name"}, {"api_name": "constants.common_constants.REDSHIFT_PORT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "constants.common_constants", "line_number": 16, "usage_type": "name"}, {"api_name": "constants.common_constants.REDSHIFT_CLUSTER_ENDPOINT", "line_number": 17, "usage_type": "attribute"}, {"api_name": "constants.common_constants", "line_number": 17, "usage_type": "name"}, {"api_name": "constants.common_constants.DATABASE_DIALECT", "line_number": 18, "usage_type": "attribute"}, {"api_name": "constants.common_constants", "line_number": 18, "usage_type": "name"}, {"api_name": "constants.common_constants.DATABASE_DRIVER", "line_number": 19, "usage_type": "attribute"}, {"api_name": "constants.common_constants", "line_number": 19, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.url.URL", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.url", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.create_engine", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.func.lower", "line_number": 73, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 73, "usage_type": "attribute"}, {"api_name": "sqlalchemy.distinct", "line_number": 124, "usage_type": "call"}, {"api_name": "time.time", "line_number": 284, "usage_type": "call"}, {"api_name": "sqlalchemy.orm", "line_number": 295, "usage_type": "attribute"}, {"api_name": "csv.DictReader", "line_number": 313, "usage_type": "call"}, {"api_name": "sqlalchemy.func.count", "line_number": 479, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 479, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.date_trunc", "line_number": 480, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 480, "usage_type": "attribute"}, {"api_name": "sqlalchemy.between", "line_number": 483, "usage_type": "call"}, {"api_name": "sqlalchemy.func.date_trunc", "line_number": 485, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 485, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.date_trunc", "line_number": 486, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 486, "usage_type": "attribute"}, {"api_name": "sqlalchemy.between", "line_number": 493, "usage_type": "call"}, {"api_name": "sqlalchemy.between", "line_number": 494, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.expression.case", "line_number": 593, "usage_type": "call"}, {"api_name": "sqlalchemy.sql", "line_number": 593, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.expression.case", "line_number": 598, "usage_type": "call"}, {"api_name": "sqlalchemy.sql", "line_number": 598, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.array_agg", "line_number": 602, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 602, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.array_agg", "line_number": 603, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 603, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.array_agg", "line_number": 604, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 604, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.max", "line_number": 659, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 659, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.expression.literal", "line_number": 660, "usage_type": "call"}, {"api_name": "sqlalchemy.sql", "line_number": 660, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.expression.literal", "line_number": 671, "usage_type": "call"}, {"api_name": "sqlalchemy.sql", "line_number": 671, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.max", "line_number": 701, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 701, "usage_type": "attribute"}, {"api_name": "sqlalchemy.asc", "line_number": 711, "usage_type": "call"}, {"api_name": "sqlalchemy.func.max", "line_number": 725, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 725, "usage_type": "attribute"}, {"api_name": "sqlalchemy.asc", "line_number": 734, "usage_type": "call"}, {"api_name": "sqlalchemy.func.sum", "line_number": 759, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 759, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.expression.case", "line_number": 760, "usage_type": "call"}, {"api_name": "sqlalchemy.sql", "line_number": 760, "usage_type": "attribute"}, {"api_name": "sqlalchemy.between", "line_number": 776, "usage_type": "call"}]} +{"seq_id": "636859634", "text": "from flask import Flask, request, session, g, redirect, url_for, abort, \\\n render_template, flash\n\n\nDATABASE = '/tmp/huRand.db'\nDEBUG = True\nSECRET_KEY = 'development key'\n\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\nif __name__ == '__main__':\n app.run()\n", "sub_path": "huRandom/huRandom.py", "file_name": "huRandom.py", "file_ext": "py", "file_size_in_byte": 278, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "391751577", "text": "from django.http import JsonResponse\n\ndef validate_with_form(form_class):\n def decorator(view):\n def wrapper(request, *args, **kwargs):\n if request.method == 'GET':\n data = request.GET\n elif request.method == 'POST':\n data = request.POST\n else:\n raise NotImplementedError()\n form = form_class(data)\n if form.is_valid():\n return view(request, form.cleaned_data, *args, **kwargs)\n else:\n return JsonResponse({\n 'success': False,\n 'message': 'There were validation errors.',\n 'errors': form.errors\n }, status=400)\n return wrapper\n return decorator\n", "sub_path": "localizefriends/api/view_decorators.py", "file_name": "view_decorators.py", "file_ext": "py", "file_size_in_byte": 778, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.http.JsonResponse", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "107266029", "text": "import asyncio\nimport uvloop\nasyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\nimport torch\nimport collections.abc\nimport os\n\nfrom .server import Server\n# from .server import Server\nfrom .worker import Worker\nfrom .util import AioSharedReadExclusiveWriteMutex, deepcopy_model, coro_print_exception, BASIC_PROGRESS_MASK, timeit_recorder\nfrom .psmodel import BaseParameterServerModule\nfrom .progress import loss_progress, ObjCountHandle\nfrom .optim import ParameterServerOptimizer\nfrom .ps_config import WorkerMode, Config\nfrom .import_util import libtorch_embedding\n\nfrom typing import List, Callable, Optional\n\n\n@timeit_recorder.coro_timeit\n@coro_print_exception\nasync def runner_coroutine(\n model: BaseParameterServerModule,\n optimizer: ParameterServerOptimizer,\n loss,\n workers: List[Worker],\n server: Server,\n num_parsers: int,\n data_loader: collections.abc.AsyncIterable,\n uri_callbacks: Optional[List[collections.abc.Awaitable]],\n stop_progress: asyncio.Event,\n loss_queue: List[float],\n update_callback_mode: bool = False\n):\n try:\n model.prepare_hook()\n worker_process_coros = dict()\n free_workers = set(workers)\n num_workers = len(workers)\n\n libtorch_embedding.set_current_thread_priority(-1000)\n\n async for content in data_loader:\n uri, downloaded_content = content\n\n # new parser\n parser = model.create_parser(downloaded_content, num_parsers)\n parser.start()\n for r in parser:\n if len(worker_process_coros) < num_workers:\n cur_worker = free_workers.pop()\n if Config.worker_mode == WorkerMode.TRAIN:\n if update_callback_mode:\n future = asyncio.ensure_future(cur_worker.process_train_with_update(r))\n else:\n future = asyncio.ensure_future(cur_worker.process_train_minibatch(r))\n else:\n future = asyncio.ensure_future(cur_worker.process_evaluation_minibatch(r))\n worker_process_coros[future] = cur_worker\n continue\n done, _ = await asyncio.wait(worker_process_coros.keys(), return_when=asyncio.FIRST_COMPLETED)\n for f in done:\n if Config.worker_mode == WorkerMode.TRAIN:\n if update_callback_mode:\n cur_loss = f.result()\n else:\n grad, meta_info, cur_loss = f.result()\n await server.schedule(grad, meta_info)\n else:\n key, predictions, cur_loss = f.result()\n await Config.predictions_queue.put(tuple([key, predictions]))\n loss_queue.append(cur_loss)\n # return finished worker back to the pool\n free_workers.add(worker_process_coros.pop(f))\n\n # First, we wait for all workers and ensure that all gradients are in queue\n if len(worker_process_coros):\n await asyncio.wait(worker_process_coros.keys())\n # Next, process all gradients. After that, no more gradients will be added\n await server.join()\n\n worker_process_coros = dict()\n free_workers = set(workers)\n\n # URI has been finished - now send callback caller to workers\n if uri_callbacks is not None:\n for callback in uri_callbacks:\n await callback(model, optimizer, loss, uri)\n finally:\n server.shutdown()\n [worker.shutdown() for worker in workers]\n stop_progress.set()\n\n\n@timeit_recorder.coro_timeit\n@coro_print_exception\nasync def single_thread_runner_coroutine(\n model: BaseParameterServerModule,\n optimizer: ParameterServerOptimizer,\n loss,\n num_parsers: int,\n data_loader: collections.abc.AsyncIterable,\n uri_callbacks: Optional[List[collections.abc.Awaitable]],\n stop_progress: asyncio.Event,\n loss_queue: List[float],\n obj_count: ObjCountHandle\n):\n try:\n model.prepare_hook()\n async for content in data_loader:\n uri, downloaded_content = content\n\n # new parser\n parser = model.create_parser(downloaded_content, num_parsers)\n parser.start()\n for r in parser:\n if Config.worker_mode == WorkerMode.TRAIN:\n *X, y = model.parse_train_minibatch(r)\n res = model(*X).squeeze()\n model.zero_grad()\n cur_loss = loss(res, y)\n cur_loss.backward()\n optimizer.step()\n await asyncio.sleep(0)\n elif Config.worker_mode == WorkerMode.EVALUATE:\n with torch.no_grad():\n *rest_batch, y = model.parse_evaluation_minibatch(r)\n key, *X = rest_batch\n predictions = model(*X).squeeze()\n cur_loss = loss(predictions, y)\n await Config.predictions_queue.put(tuple([key, predictions]))\n else:\n raise NotImplementedError()\n cur_loss = float(cur_loss.data) / len(y)\n loss_queue.append(cur_loss)\n obj_count += len(y)\n\n # URI has been finished - now send callback caller to workers\n if uri_callbacks is not None:\n for callback in uri_callbacks:\n await callback(model, optimizer, loss, uri)\n finally:\n stop_progress.set()\n\n\ndef run(model: BaseParameterServerModule,\n loss: torch.nn.modules.loss._Loss,\n optimizer: ParameterServerOptimizer,\n data_loader: collections.abc.AsyncIterable,\n num_workers: int,\n num_servers: int,\n num_local_servers: int,\n num_parsers: int,\n sync_policy: Callable[[torch.Tensor], bool] = None,\n frequency: float = 1,\n max_gradient_queue_size: int = 0,\n uri_callbacks: Optional[List[collections.abc.Awaitable]] = None,\n worker_mode: WorkerMode = WorkerMode.TRAIN,\n update_callback_mode: bool = False,\n single_thread: bool = False):\n \"\"\"\n Run asynchronous one-machine training of deep learning model.\n BaseParameterServerModule contains of two parts: async_forward and sync_forward. AsyncForward must\n be called only on HashEmbedding layers because async_forward is called in THREAD.\n After async_forward has been executed, the models is sent to subprocess. There, we calculate the deep part of model\n (everything after first embedding layers).\n \n :param model: instance of torch_ps.util.BaseParameterServerModule\n :param loss: instance of torch.nn.modules.loss._Loss\n :param optimizer: instance of torch_ps.optim.ParameterServerOptimizer\n :param yt_uris: list of YT paths for input tables\n :param num_workers: int, number of workers. Each worker is responsible for processing minibatch, e.g. calculating\n forward-backward passes, and sending gradients to servers\n :param num_servers: number of processes to update deep part of the model. Deep part of model must contain only tensors\n :param num_local_servers: number of threads to update embedding part of the model. Embedding part should only contain\n HashEmbedding layers\n :param num_parsers: number of threads to parse records.\n \n :param num_downloaders: number of threads to download yt_uris. Each downloader download one uri. Uris are downloaded\n and passed to the model sequentially according to order in yt_uris list\n :param token: YT token string\n :param sync_policy: SyncPolicy is called each time a new gradient has been processed by server. Returns True or False. When True,\n model is blocked for synchronization. Usually synchronization requires a bit of copying for small tensors and is very fast\n :param parser_max_records: Queue size for parser. \n :param frequency: Frequency of progress printing\n :param max_gradient_queue_size: maxsize of gradient queue\n :param uri_callbacks: list of callbacks that will be executed each time URI processing has finished\n :param ps_config: instance keeping all global variables for running program\n :param single_thread: run in single thread, process/update, one-by-one. Only for debugging\n \"\"\"\n Config.worker_mode = worker_mode\n\n # Setup event loop\n loop = asyncio.get_event_loop_policy().new_event_loop()\n if 'PYTHONASYNCIODEBUG' in os.environ:\n loop = asyncio.get_event_loop()\n loop.set_debug(True)\n loop.slow_callback_duration = 0.001\n asyncio.set_event_loop(loop)\n\n # Create all synchronization primitives\n if sync_policy is not None:\n synced_model = deepcopy_model(model)\n else:\n synced_model = model\n update_locker = AioSharedReadExclusiveWriteMutex()\n loss_queue = list()\n obj_count = ObjCountHandle()\n stop_progress = asyncio.Event()\n\n # Progress coroutine\n # create header\n mask = \" \".join([BASIC_PROGRESS_MASK, model.model_progress_mask()])\n header = mask.format(\"Sec\", \"Total objs\", \"objs\", \"Grad. queue\", \"Loss\", *model.model_progress_header())\n\n # The main runner coroutine\n if not single_thread:\n server = Server(model, synced_model, optimizer, max_gradient_queue_size, num_local_servers, num_servers,\n sync_policy, update_locker)\n workers = [\n Worker(synced_model, loss, rank, update_locker, Config.predictions_queue, obj_count, server)\n for rank in range(num_workers)\n ]\n\n progress_coroutine = loop.create_task(\n loss_progress(\n loss_queue, obj_count, stop_progress, frequency,\n lambda: server.queue_size(), model.model_progress_callback,\n mask, header\n )\n )\n\n runner = loop.create_task(\n runner_coroutine(\n model, optimizer, loss, workers, server, num_parsers, data_loader, uri_callbacks, stop_progress,\n loss_queue, update_callback_mode\n )\n )\n else:\n progress_coroutine = loop.create_task(\n loss_progress(\n loss_queue, obj_count, stop_progress, frequency,\n lambda: 0, model.model_progress_callback,\n mask, header\n )\n )\n\n runner = loop.create_task(\n single_thread_runner_coroutine(\n model, optimizer, loss, num_parsers, data_loader, uri_callbacks, stop_progress, loss_queue, obj_count\n )\n )\n\n # Run everything\n loop.run_until_complete(asyncio.wait([runner, progress_coroutine]))\n\n model.finish_hook()\n", "sub_path": "torch_ps/runner.py", "file_name": "runner.py", "file_ext": "py", "file_size_in_byte": 10956, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "asyncio.set_event_loop_policy", "line_number": 3, "usage_type": "call"}, {"api_name": "uvloop.EventLoopPolicy", "line_number": 3, "usage_type": "call"}, {"api_name": "psmodel.BaseParameterServerModule", "line_number": 24, "usage_type": "name"}, {"api_name": "optim.ParameterServerOptimizer", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "worker.Worker", "line_number": 27, "usage_type": "name"}, {"api_name": "server.Server", "line_number": 28, "usage_type": "name"}, {"api_name": "collections.abc.abc", "line_number": 30, "usage_type": "attribute"}, {"api_name": "collections.abc", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 31, "usage_type": "name"}, {"api_name": "collections.abc.abc", "line_number": 31, "usage_type": "attribute"}, {"api_name": "collections.abc", "line_number": 31, "usage_type": "name"}, {"api_name": "asyncio.Event", "line_number": 32, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 33, "usage_type": "name"}, {"api_name": "import_util.libtorch_embedding.set_current_thread_priority", "line_number": 42, "usage_type": "call"}, {"api_name": "import_util.libtorch_embedding", "line_number": 42, "usage_type": "name"}, {"api_name": "ps_config.Config.worker_mode", "line_number": 53, "usage_type": "attribute"}, {"api_name": "ps_config.Config", "line_number": 53, "usage_type": "name"}, {"api_name": "ps_config.WorkerMode.TRAIN", "line_number": 53, "usage_type": "attribute"}, {"api_name": "ps_config.WorkerMode", "line_number": 53, "usage_type": "name"}, {"api_name": "asyncio.ensure_future", "line_number": 55, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 57, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 59, "usage_type": "call"}, {"api_name": "asyncio.wait", "line_number": 62, "usage_type": "call"}, {"api_name": "asyncio.FIRST_COMPLETED", "line_number": 62, "usage_type": "attribute"}, {"api_name": "ps_config.Config.worker_mode", "line_number": 64, "usage_type": "attribute"}, {"api_name": "ps_config.Config", "line_number": 64, "usage_type": "name"}, {"api_name": "ps_config.WorkerMode.TRAIN", "line_number": 64, "usage_type": "attribute"}, {"api_name": "ps_config.WorkerMode", "line_number": 64, "usage_type": "name"}, {"api_name": "server.schedule", "line_number": 69, "usage_type": "call"}, {"api_name": "ps_config.Config.predictions_queue.put", "line_number": 72, "usage_type": "call"}, {"api_name": "ps_config.Config.predictions_queue", "line_number": 72, "usage_type": "attribute"}, {"api_name": "ps_config.Config", "line_number": 72, "usage_type": "name"}, {"api_name": "asyncio.wait", "line_number": 79, "usage_type": "call"}, {"api_name": "server.join", "line_number": 81, "usage_type": "call"}, {"api_name": "server.shutdown", "line_number": 91, "usage_type": "call"}, {"api_name": "worker.shutdown", "line_number": 92, "usage_type": "call"}, {"api_name": "util.timeit_recorder.coro_timeit", "line_number": 21, "usage_type": "attribute"}, {"api_name": "util.timeit_recorder", "line_number": 21, "usage_type": "name"}, {"api_name": "util.coro_print_exception", "line_number": 22, "usage_type": "name"}, {"api_name": "psmodel.BaseParameterServerModule", "line_number": 99, "usage_type": "name"}, {"api_name": "optim.ParameterServerOptimizer", "line_number": 100, "usage_type": "name"}, {"api_name": "collections.abc.abc", "line_number": 103, "usage_type": "attribute"}, {"api_name": "collections.abc", "line_number": 103, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 104, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 104, "usage_type": "name"}, {"api_name": "collections.abc.abc", "line_number": 104, "usage_type": "attribute"}, {"api_name": "collections.abc", "line_number": 104, "usage_type": "name"}, {"api_name": "asyncio.Event", "line_number": 105, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 106, "usage_type": "name"}, {"api_name": "progress.ObjCountHandle", "line_number": 107, "usage_type": "name"}, {"api_name": "ps_config.Config.worker_mode", "line_number": 118, "usage_type": "attribute"}, {"api_name": "ps_config.Config", "line_number": 118, "usage_type": "name"}, {"api_name": "ps_config.WorkerMode.TRAIN", "line_number": 118, "usage_type": "attribute"}, {"api_name": "ps_config.WorkerMode", "line_number": 118, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 125, "usage_type": "call"}, {"api_name": "ps_config.Config.worker_mode", "line_number": 126, "usage_type": "attribute"}, {"api_name": "ps_config.Config", "line_number": 126, "usage_type": "name"}, {"api_name": "ps_config.WorkerMode.EVALUATE", "line_number": 126, "usage_type": "attribute"}, {"api_name": "ps_config.WorkerMode", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 127, "usage_type": "call"}, {"api_name": "ps_config.Config.predictions_queue.put", "line_number": 132, "usage_type": "call"}, {"api_name": "ps_config.Config.predictions_queue", "line_number": 132, "usage_type": "attribute"}, {"api_name": "ps_config.Config", "line_number": 132, "usage_type": "name"}, {"api_name": "util.timeit_recorder.coro_timeit", "line_number": 96, "usage_type": "attribute"}, {"api_name": "util.timeit_recorder", "line_number": 96, "usage_type": "name"}, {"api_name": "util.coro_print_exception", "line_number": 97, "usage_type": "name"}, {"api_name": "psmodel.BaseParameterServerModule", "line_number": 147, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 148, "usage_type": "attribute"}, {"api_name": "optim.ParameterServerOptimizer", "line_number": 149, "usage_type": "name"}, {"api_name": "collections.abc.abc", "line_number": 150, "usage_type": "attribute"}, {"api_name": "collections.abc", "line_number": 150, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 155, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 155, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 158, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 158, "usage_type": "name"}, {"api_name": "collections.abc.abc", "line_number": 158, "usage_type": "attribute"}, {"api_name": "collections.abc", "line_number": 158, "usage_type": "name"}, {"api_name": "ps_config.WorkerMode", "line_number": 159, "usage_type": "name"}, {"api_name": "ps_config.WorkerMode.TRAIN", "line_number": 159, "usage_type": "attribute"}, {"api_name": "ps_config.Config.worker_mode", "line_number": 192, "usage_type": "attribute"}, {"api_name": "ps_config.Config", "line_number": 192, "usage_type": "name"}, {"api_name": "asyncio.get_event_loop_policy", "line_number": 195, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 196, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 197, "usage_type": "call"}, {"api_name": "asyncio.set_event_loop", "line_number": 200, "usage_type": "call"}, {"api_name": "util.deepcopy_model", "line_number": 204, "usage_type": "call"}, {"api_name": "util.AioSharedReadExclusiveWriteMutex", "line_number": 207, "usage_type": "call"}, {"api_name": "progress.ObjCountHandle", "line_number": 209, "usage_type": "call"}, {"api_name": "asyncio.Event", "line_number": 210, "usage_type": "call"}, {"api_name": "util.BASIC_PROGRESS_MASK", "line_number": 214, "usage_type": "name"}, {"api_name": "server.Server", "line_number": 219, "usage_type": "call"}, {"api_name": "worker.Worker", "line_number": 222, "usage_type": "call"}, {"api_name": "ps_config.Config.predictions_queue", "line_number": 222, "usage_type": "attribute"}, {"api_name": "ps_config.Config", "line_number": 222, "usage_type": "name"}, {"api_name": "progress.loss_progress", "line_number": 227, "usage_type": "call"}, {"api_name": "server.queue_size", "line_number": 229, "usage_type": "call"}, {"api_name": "progress.loss_progress", "line_number": 242, "usage_type": "call"}, {"api_name": "asyncio.wait", "line_number": 256, "usage_type": "call"}]} +{"seq_id": "321640238", "text": "#!/usr/bin/python\n\nimport sys, os;\nimport argparse;\nfrom os.path import expanduser;\nimport pandas as pd;\nimport math;\n\n__author__ = \"Jeetesh Mangwani\"\n\ndef main():\n parser = argparse.ArgumentParser(description=\"This script outputs realized & unrealized gains and losses, assest lots and adjusted cost basis\");\n parser.add_argument(\"-wh\", \"--wallethistory\", type=str, help=\"The input xlsx file cotaining your wallet history\", required=False, default=\"wallethistory-normalized.xlsx\");\n parser.add_argument(\"-r\", \"--rates\", type=str, help=\"The input xlsx file containing the rates of your assets\", required=False, default=\"asset-rates.xlsx\");\n parser.add_argument(\"-st\", \"--statement\", type=str, help=\"The output xlsx file containing your gains and losses and asset lots\", required=False, default=\"statement.xlsx\");\n parser.add_argument(\"-ul\", \"--unsoldlots\", type=str, help=\"The output xlsx file containing your unsold asset lots\", required=False, default=\"unsoldlots.xlsx\");\n parser.add_argument(\"-v\", \"--verbose\", help=\"Whether to output verbose output messages\", required=False, default=False);\n args = parser.parse_args();\n print(\"Input Wallet History file: \", args.wallethistory);\n print(\"Input Asset Rates file: \", args.rates);\n print(\"Output Gain-Loss statement file: \", args.statement);\n print(\"Output Unsold Lots file: \", args.unsoldlots);\n print(\"Verbosity of log messages: \", args.verbose);\n\n historyDfs = pd.read_excel(args.wallethistory, sheet_name=\"Sheet1\")\n ratesDfs = pd.read_excel(args.rates, sheet_name=\"Sheet1\")\n # print(historyDfs);\n historyDfs = historyDfs.sort_values(by=['dateTime']);\n historyDfs = historyDfs.set_index(i for i in range(len(historyDfs.index)));\n # print(historyDfs);\n\n #historyDfs.to_excel(\"debug.xlsx\")\n\n outputFormat = {\n 'dateTime': pd.Series([], dtype='str'),\n 'asset': pd.Series([], dtype='str'),\n 'type': pd.Series([], dtype='str'),\n 'amount': pd.Series([], dtype='float'),\n 'txnPricePerUnit': pd.Series([], dtype='float'),\n 'totalCost': pd.Series([], dtype='float'),\n 'currentPricePerUnit': pd.Series([], dtype='float'),\n\n # for BUYs\n 'unsoldAmount': pd.Series([], dtype='float'),\n 'soldAmount': pd.Series([], dtype='float'),\n 'unrealizedGain': pd.Series([], dtype='float'),\n 'unrealizedGainPercent': pd.Series([], dtype='float'),\n\n #for SELLs\n 'interestLot': pd.Series([], dtype='float'),\n 'realizedGain': pd.Series([], dtype='float'),\n 'realizedGainPercent': pd.Series([], dtype='float'),\n\n # details for BUYs\n 'soldInLots': pd.Series([], dtype='str'),\n 'sellingDates': pd.Series([], dtype='str'),\n\n # details for SELLs\n 'boughtInLots': pd.Series([], dtype='str'),\n 'buyingDates': pd.Series([], dtype='str'),\n 'lotGains': pd.Series([], dtype='str'),\n };\n\n statementDfs = pd.DataFrame(outputFormat);\n\n rates = {};\n scratchPad = {};\n runningLots = {};\n\n for index, row in ratesDfs.iterrows():\n rates[str(row['asset'])]=float(row['pricePerUnit'])\n\n # print(rates);\n\n for index, row in historyDfs.iterrows():\n txnDateTime = str(row['dateTime']);\n asset = str(row['asset']);\n type = str(row['type']);\n amount = float(row['amount']);\n txnPricePerUnit = float(row['pricePerUnit']);\n totalCost = float(row['totalCost']);\n\n if asset not in rates:\n raise Exception(\"Could not find rate for asset: \" + asset);\n currentPricePerUnit = rates[asset];\n\n if (type == \"BUY\"):\n runningRate = rates[asset];\n costPrice = amount * txnPricePerUnit\n unrealizedLotGain = (amount * runningRate) - costPrice;\n unrealizedLotGainPercent = 0 if math.isclose(0.0, costPrice) else unrealizedLotGain / costPrice * 100\n scratchPad[index] = {\n 'dateTime': txnDateTime,\n 'asset': asset,\n 'type': 'BUY',\n 'amount': amount,\n 'txnPricePerUnit': txnPricePerUnit,\n 'totalCost': totalCost,\n 'currentPricePerUnit': currentPricePerUnit,\n\n 'unsoldAmount': amount,\n 'soldAmount': 0.0,\n 'unrealizedGain': unrealizedLotGain,\n 'unrealizedGainPercent': unrealizedLotGainPercent,\n 'soldInLots': [],\n 'sellingDates': [],\n\n 'boughtInLots': [],\n 'buyingDates': [],\n 'interestLot': 0.0,\n 'lotGains': [],\n 'realizedGain': 0.0,\n 'realizedGainPercent': 0.0,\n };\n\n if asset not in runningLots:\n runningLots[asset] = [];\n\n runningLots[asset].append(index)\n elif (type == \"SELL\"):\n wantToSell = amount;\n boughtInLots = [];\n buyingDates = [];\n interestLot = 0.0;\n lotGains = [];\n realizedGain = 0.0;\n costBasis = 0.0;\n\n while(math.isclose(wantToSell, 0.0) == False):\n if not runningLots[asset]:\n print(\"Selling more than you bought! Classifying as interest: \" + str(row));\n selling = wantToSell;\n interestLot = wantToSell;\n #buyingDates.append('unknown');\n costPrice = 0.0;\n costBasis += selling * costPrice;\n sellingPrice = txnPricePerUnit;\n lotGain = selling * (sellingPrice - costPrice);\n lotGains.append(lotGain);\n realizedGain += lotGain;\n wantToSell -= selling;\n else:\n oldestLotIndex = runningLots[asset][0];\n oldestLot = scratchPad[oldestLotIndex];\n\n if(oldestLot['unsoldAmount'] < wantToSell\n or math.isclose(wantToSell, oldestLot['unsoldAmount'])):\n del runningLots[asset][0]\n selling = oldestLot['unsoldAmount'];\n oldestLot['soldAmount'] += selling\n if math.isclose(oldestLot['amount'], oldestLot['soldAmount']) == False:\n raise Exception(\"Sold everything but amounts don't match: \"\n + str(oldestLot) + str(row));\n oldestLot['unsoldAmount'] = 0.0;\n oldestLot['soldInLots'].append(selling);\n oldestLot['sellingDates'].append(txnDateTime);\n\n boughtInLots.append(selling);\n buyingDates.append(oldestLot['dateTime']);\n costPrice = oldestLot['txnPricePerUnit'];\n costBasis += selling * costPrice;\n sellingPrice = txnPricePerUnit;\n lotGain = selling * (sellingPrice - costPrice);\n lotGains.append(lotGain);\n realizedGain += lotGain;\n wantToSell -= selling;\n else:\n selling = wantToSell;\n oldestLot['soldAmount'] += selling\n oldestLot['unsoldAmount'] -= selling\n oldestLot['soldInLots'].append(selling);\n oldestLot['sellingDates'].append(txnDateTime);\n\n boughtInLots.append(selling);\n buyingDates.append(oldestLot['dateTime']);\n costPrice = oldestLot['txnPricePerUnit'];\n costBasis += selling * costPrice;\n sellingPrice = txnPricePerUnit;\n lotGain = selling * (sellingPrice - costPrice);\n lotGains.append(lotGain);\n realizedGain += lotGain;\n wantToSell -= selling;\n\n realizedGainPercent = 0 if math.isclose(costBasis, 0.0) else realizedGain / costBasis * 100;\n scratchPad[index] = {\n 'dateTime': txnDateTime,\n 'asset': asset,\n 'type': 'SELL',\n 'amount': amount,\n 'txnPricePerUnit': txnPricePerUnit,\n 'totalCost': totalCost,\n 'currentPricePerUnit': currentPricePerUnit,\n\n 'unsoldAmount': 0.0,\n 'soldAmount': 0.0,\n 'unrealizedGain': 0.0,\n 'unrealizedGainPercent': 0.0,\n 'soldInLots': [],\n 'sellingDates': [],\n\n 'boughtInLots': boughtInLots,\n 'buyingDates': buyingDates,\n 'interestLot': interestLot,\n 'lotGains': lotGains,\n 'realizedGain': realizedGain,\n 'realizedGainPercent': realizedGainPercent\n };\n else:\n raise Exception(\"Invalid row: \" + row);\n\n for key, txn in scratchPad.items():\n statementDfs = statementDfs.append({\n 'dateTime': txn['dateTime'],\n 'asset': txn['asset'],\n 'type': txn['type'],\n 'amount': txn['amount'],\n 'txnPricePerUnit': txn['txnPricePerUnit'],\n 'currentPricePerUnit': txn['currentPricePerUnit'],\n 'totalCost': txn['totalCost'],\n\n 'unsoldAmount': txn['unsoldAmount'],\n 'soldAmount': txn['soldAmount'],\n 'unrealizedGain': txn['unrealizedGain'],\n 'unrealizedGainPercent': txn['unrealizedGainPercent'],\n 'soldInLots': str(txn['soldInLots']),\n 'sellingDates': str(txn['sellingDates']),\n\n 'boughtInLots': str(txn['boughtInLots']),\n 'buyingDates': str(txn['buyingDates']),\n 'interestLot': txn['interestLot'],\n 'lotGains': str(txn['lotGains']),\n 'realizedGain': txn['realizedGain'],\n 'realizedGainPercent': txn['realizedGainPercent'],\n }, ignore_index=True);\n\n lotsDfs = pd.DataFrame(outputFormat);\n portfolioAdjustedCostBasis = 0.0\n portfolioUnrealizedGain = 0.0\n portfolioUnrealizedGainPercent = 0.0\n for asset, keys in runningLots.items():\n #print(asset);\n sheetName = asset;\n totalUnsoldAmount = 0.0;\n adjustedCostBasis = 0.0;\n totalUnrealizedGain = 0.0;\n totalUnrealizedGainPercent = 0.0;\n for key in keys:\n txn = scratchPad[key];\n totalUnsoldAmount += txn['unsoldAmount']\n adjustedCostBasis += txn['unsoldAmount'] * txn['txnPricePerUnit']\n totalUnrealizedGain += txn['unrealizedGain']\n #print(scratchPad[key]);\n lotsDfs = lotsDfs.append({\n 'dateTime': txn['dateTime'],\n 'asset': txn['asset'],\n 'type': txn['type'],\n 'amount': txn['amount'],\n 'txnPricePerUnit': txn['txnPricePerUnit'],\n 'currentPricePerUnit': txn['currentPricePerUnit'],\n 'totalCost': txn['totalCost'],\n\n 'unsoldAmount': txn['unsoldAmount'],\n 'soldAmount': txn['soldAmount'],\n 'unrealizedGain': txn['unrealizedGain'],\n 'unrealizedGainPercent': txn['unrealizedGainPercent'],\n 'soldInLots': str(txn['soldInLots']),\n 'sellingDates': str(txn['sellingDates']),\n\n 'boughtInLots': str(txn['boughtInLots']),\n 'buyingDates': str(txn['buyingDates']),\n 'interestLot': txn['interestLot'],\n 'lotGains': str(txn['lotGains']),\n 'realizedGain': txn['realizedGain'],\n 'realizedGainPercent': txn['realizedGainPercent'],\n }, ignore_index=True);\n totalUnrealizedGainPercent = 0.0 if math.isclose(adjustedCostBasis, 0.0) else totalUnrealizedGain / adjustedCostBasis * 100;\n portfolioAdjustedCostBasis += adjustedCostBasis;\n portfolioUnrealizedGain += totalUnrealizedGain\n lotsDfs = lotsDfs.append({\n 'asset': txn['asset'],\n 'currentPricePerUnit': txn['currentPricePerUnit'],\n 'adjustedCostBasis': float(adjustedCostBasis),\n\n 'unsoldAmount': totalUnsoldAmount,\n 'unrealizedGain': totalUnrealizedGain,\n 'unrealizedGainPercent': totalUnrealizedGainPercent,\n }, ignore_index=True);\n portfolioUnrealizedGainPercent = 0.0 if math.isclose(portfolioAdjustedCostBasis, 0.0) else portfolioUnrealizedGain / portfolioAdjustedCostBasis * 100\n lotsDfs = lotsDfs.append({\n 'asset': '*',\n 'adjustedCostBasis': float(portfolioAdjustedCostBasis),\n 'unrealizedGain': portfolioUnrealizedGain,\n 'unrealizedGainPercent': portfolioUnrealizedGainPercent,\n }, ignore_index=True);\n\n lotsDfs.to_excel(args.unsoldlots, 'sheet1');\n #print(outputDfs);\n statementDfs.to_excel(args.statement, \"sheet1\")\n\nmain();\n", "sub_path": "generate_wallet_statement.py", "file_name": "generate_wallet_statement.py", "file_ext": "py", "file_size_in_byte": 13067, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "call"}, {"api_name": "math.isclose", "line_number": 91, "usage_type": "call"}, {"api_name": "math.isclose", "line_number": 129, "usage_type": "call"}, {"api_name": "math.isclose", "line_number": 147, "usage_type": "call"}, {"api_name": "math.isclose", "line_number": 151, "usage_type": "call"}, {"api_name": "math.isclose", "line_number": 184, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 236, "usage_type": "call"}, {"api_name": "math.isclose", "line_number": 276, "usage_type": "call"}, {"api_name": "math.isclose", "line_number": 288, "usage_type": "call"}]} +{"seq_id": "428883883", "text": "import torch\nfrom torch import nn, optim\nimport sys\nimport numpy as np\nfrom utils import load_dataset, RunningAverage\nimport argparse\nfrom torch.utils.data import DataLoader\nimport transformers as ppb\nimport utils\nimport os\nimport logging\nimport pdb\nfrom collections import Counter\nfrom string import punctuation\n\nsys.path.insert(0, './database/features')\nfrom datavec1 import X1_num\nfrom addresses1 import X1_str\nfrom labels1 import y1\nfrom main_bert import AddressDataset, get_dataloaders\n\nfrom sklearn.metrics import f1_score, recall_score, precision_score\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass LstmModel(nn.Module):\n def __init__(self, args):\n super(LstmModel, self).__init__()\n self.lstm_size = 128\n self.embedding_dim = 128 # v3\n self.num_layers = 3\n\n len_dataset = len(X1_num)\n self.embedding = nn.Embedding(\n num_embeddings=len_dataset,\n embedding_dim=self.embedding_dim,\n )\n self.lstm = nn.LSTM(\n input_size=self.embedding_dim,\n hidden_size=self.lstm_size,\n num_layers=self.num_layers,\n dropout=0.2,\n )\n self.fc = nn.Linear(self.lstm_size, 2)\n\n def forward(self, x, x_int, prev_state):\n # v3: only NN embeddings\n embed = self.embedding(x_int).mean(dim=2)\n output, state = self.lstm(embed.float(), prev_state)\n logits = self.fc(output)\n return logits, state\n\n def init_state(self, sequence_length):\n return (torch.zeros(self.num_layers, sequence_length, self.lstm_size),\n torch.zeros(self.num_layers, sequence_length, self.lstm_size))\n\n\nclass AddressDataset2(torch.utils.data.Dataset):\n def __init__(self, args, mode='train'):\n self.args = args\n self.mode = mode\n for i, address in enumerate(X1_str):\n X1_str[i] = ''.join([c for c in address if c not in punctuation])\n\n all_text2 = ' '.join(X1_str)\n words = all_text2.split()\n count_words = Counter(words)\n total_words = len(words)\n sorted_words = count_words.most_common(total_words)\n vocab_to_int = {w: i for i, (w, c) in enumerate(sorted_words)}\n X1_int = []\n for address in X1_str:\n encoded = [vocab_to_int[w] for w in address.split()]\n X1_int.append(encoded)\n\n X1_int = self.pad_features(X1_int, 40)\n val_split = round(len(X1_int) * 0.2)\n self.X_train = torch.tensor(X1_int[:-val_split])\n self.y_train = torch.tensor(y1[:-val_split])\n self.X_val = torch.tensor(X1_int[-val_split:])\n self.y_val = torch.tensor(y1[-val_split:])\n\n def pad_features(self, reviews_int, seq_length):\n ''' Return features of address_ints, where each address is padded with 0's or truncated to the input seq_length.\n '''\n features = np.zeros((len(reviews_int), seq_length), dtype=int)\n\n for i, review in enumerate(reviews_int):\n review_len = len(review)\n\n if review_len <= seq_length:\n zeroes = list(np.zeros(seq_length - review_len))\n new = zeroes + review\n elif review_len > seq_length:\n new = review[0:seq_length]\n\n features[i, :] = np.array(new)\n return features\n\n def __len__(self):\n if self.mode == 'train':\n return len(self.X_train) - self.args.sequence_length - 1\n else:\n return len(self.X_val) - self.args.sequence_length - 1\n\n def __getitem__(self, index):\n if self.mode == 'train':\n return self.X_train[index:index + self.args.sequence_length], self.y_train[index:index + self.args.sequence_length]\n elif self.mode == 'val':\n return self.X_val[index:index + self.args.sequence_length], self.y_val[index:index + self.args.sequence_length]\n\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, args, mode='train'):\n self.args = args\n self.mode = mode\n self.X_train, self.y_train, self.X_val, self.y_val = load_dataset(\n X1_num, y1, self.args.num_features)\n print(self.X_train.shape, self.y_train.shape,\n self.X_val.shape, self.y_val.shape)\n\n def __len__(self):\n if self.mode == 'train':\n return self.X_train.shape[0] - self.args.sequence_length - 1\n else:\n return self.X_val.shape[0] - self.args.sequence_length - 1\n\n def __getitem__(self, index):\n if self.mode == 'train':\n return torch.from_numpy(self.X_train[index:index + self.args.sequence_length]), torch.from_numpy(self.y_train[index:index + self.args.sequence_length])\n elif self.mode == 'val':\n return torch.from_numpy(self.X_val[index:index + self.args.sequence_length]), torch.from_numpy(self.y_val[index:index + self.args.sequence_length])\n\n\ndef accuracy(y_pred, y_test):\n y_pred_softmax = torch.log_softmax(y_pred, dim=1)\n _, y_pred_tags = torch.max(y_pred_softmax, dim=1)\n\n correct_pred = (y_pred_tags == y_test).float()\n acc = correct_pred.sum() / torch.numel(correct_pred)\n\n return acc.item()\n\n\ndef other_metrics(y_pred, y_test):\n y_pred_softmax = torch.log_softmax(y_pred, dim=1)\n _, y_pred_tags = torch.max(y_pred_softmax, dim=1)\n\n f1 = f1_score(y_test[:, 0].cpu().numpy(), y_pred_tags[:, 0].cpu().numpy())\n recall = recall_score(y_test[:, 0].cpu().numpy(),\n y_pred_tags[:, 0].cpu().numpy())\n precision = precision_score(\n y_test[:, 0].cpu().numpy(), y_pred_tags[:, 0].cpu().numpy())\n\n return f1, recall, precision\n\n\ndef train(dataset, dataset2, model, args, mode):\n model.train()\n spt_loader = DataLoader(dataset, batch_size=args.batch_size)\n loader = DataLoader(dataset2, batch_size=args.batch_size)\n dataloader_iter = iter(loader)\n spt_dataloader_iter = iter(spt_loader)\n state_h, state_c = model.init_state(args.sequence_length)\n batch = 0\n total_loss = 0\n total_acc = 0\n while True:\n try:\n X, y = next(spt_dataloader_iter)\n X_str, y2 = next(dataloader_iter)\n except RuntimeError:\n continue\n except StopIteration:\n break\n\n optimizer.zero_grad()\n y_pred, (state_h, state_c) = model(X.to(device), X_str.to(device),\n (state_h.to(device), state_c.to(device)))\n loss = criterion(y_pred.transpose(1, 2), y.long().to(device))\n total_loss += loss.item()\n\n acc = accuracy(y_pred.transpose(1, 2), y.long().to(device))\n total_acc += acc\n\n state_h = state_h.detach()\n state_c = state_c.detach()\n\n loss.backward()\n optimizer.step()\n\n if batch % 100 == 0:\n logging.info({'epoch': epoch, 'batch': batch,\n 'train_loss': '{:05.4f}'.format(loss.item())})\n batch += 1\n\n logging.info({'epoch': epoch, 'train_loss': '{:05.4f}'.format(\n total_loss / batch), 'accuracy': '{:05.3f}'.format(total_acc / batch)})\n\n\ndef val(dataset, dataset2, model, args, mode):\n model.eval()\n spt_loader = DataLoader(dataset, batch_size=args.batch_size)\n loader = DataLoader(dataset2, batch_size=args.batch_size)\n dataloader_iter = iter(loader)\n spt_dataloader_iter = iter(spt_loader)\n state_h, state_c = model.init_state(args.sequence_length)\n total_loss = 0\n total_acc = 0\n total_f1 = 0\n total_recall = 0\n total_precision = 0\n batch = 0\n while True:\n try:\n X, y = next(spt_dataloader_iter)\n X_str, y2 = next(dataloader_iter)\n except RuntimeError:\n continue\n except StopIteration:\n break\n\n y_pred, (state_h, state_c) = model(X.to(device), X_str.to(device),\n (state_h.to(device), state_c.to(device)))\n loss = criterion(y_pred.transpose(\n 1, 2), y.long().to(device))\n total_loss += loss.item()\n\n acc = accuracy(y_pred.transpose(1, 2), y.long().to(device))\n f1, recall, precision = other_metrics(\n y_pred.transpose(1, 2), y.long().to(device))\n\n total_acc += acc\n total_f1 += f1\n total_recall += recall\n total_precision += precision\n\n batch += 1\n\n logging.info({'epoch': epoch, 'val_loss': '{:05.4f}'.format(\n total_loss / batch), 'accuracy': '{:05.3f}'.format(total_acc / batch)})\n logging.info({'f1-score': '{:05.3f}'.format(total_f1 / batch), 'recall': '{:05.3f}'.format(\n total_recall / batch), 'precision': '{:05.3f}'.format(total_precision / batch)})\n\n return total_acc / batch\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--max-epochs', type=int, default=10)\nparser.add_argument('--batch-size', type=int, default=16)\nparser.add_argument('--sequence-length', type=int, default=4)\nparser.add_argument('--num-features', type=int, default=8)\nparser.add_argument('--model_dir', default='experiments/base_model')\nparser.add_argument('--restore_file', default='best',\n help=\"Optional, file name from which reload weights before training e.g. 'best' or 'last' or 'None'\")\nparser.add_argument('--patience', type=int, default=5,\n help=\"Epochs for early stopping\")\nparser.add_argument('--seed', type=int, default=100,\n help='Seed for randomization')\nparser.add_argument('--val_split', type=float, default=0.2,\n help='Size of validation set')\nparser.add_argument('--shuffle', action='store_true',\n help='Flag for shuffling dataset')\nargs = parser.parse_args()\n\nutils.set_logger(os.path.join(args.model_dir, 'train.log'))\nlogging.info(args)\n\ndataset_len = 0\ntrain_set = Dataset(args, 'train')\nval_set = Dataset(args, 'val')\ntrain_set2 = AddressDataset2(args, 'train')\nval_set2 = AddressDataset2(args, 'val')\n\nmodel = LstmModel(args).to(device)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=0.001)\n\nmetrics = {\n 'accuracy': accuracy,\n # add more metrics if required for each token type\n}\n\npatience = 5\nbest_val_acc = 0.0\n\nif args.restore_file is not None:\n restore_path = os.path.join(args.model_dir, args.restore_file + '.pth.tar')\n logging.info('Restoring parameters from {}'.format(restore_path))\n utils.load_checkpoint(restore_path, model, optimizer)\n\n filepath = args.model_dir + 'val_best_weights.json'\n if os.path.exists(filepath):\n f = open(filepath)\n data = json.load(f)\n best_val_acc = data['accuracy']\n f.close()\n\nfor epoch in range(args.max_epochs):\n train(train_set, train_set2, model, args, 'train')\n val_acc = val(val_set, val_set2, model, args, 'val')\n val_metrics = {'accuracy': val_acc}\n is_best = val_acc >= best_val_acc\n\n utils.save_checkpoint({'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'optim_dict': optimizer.state_dict()}, is_best=is_best, checkpoint=args.model_dir)\n\n if is_best:\n logging.info('- Found new best accuracy')\n counter = 0 # reset counter\n best_val_acc = val_acc\n\n best_json_path = os.path.join(\n args.model_dir, 'val_best_weights.json')\n utils.save_dict_to_json(val_metrics, best_json_path)\n else:\n counter += 1\n\n if counter > patience:\n logging.info('- No improvement in a while, stopping training...')\n last_json_path = os.path.join(\n args.model_dir, 'val_last_weights.json')\n utils.save_dict_to_json(val_metrics, last_json_path)\n", "sub_path": "main_lstm_v3.py", "file_name": "main_lstm_v3.py", "file_ext": "py", "file_size_in_byte": 11636, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.path.insert", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "datavec1.X1_num", "line_number": 34, "usage_type": "argument"}, {"api_name": "torch.nn.Embedding", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 59, "usage_type": "attribute"}, {"api_name": "addresses1.X1_str", "line_number": 63, "usage_type": "argument"}, {"api_name": "addresses1.X1_str", "line_number": 64, "usage_type": "name"}, {"api_name": "string.punctuation", "line_number": 64, "usage_type": "name"}, {"api_name": "addresses1.X1_str", "line_number": 66, "usage_type": "argument"}, {"api_name": "collections.Counter", "line_number": 68, "usage_type": "call"}, {"api_name": "addresses1.X1_str", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 80, "usage_type": "call"}, {"api_name": "labels1.y1", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 82, "usage_type": "call"}, {"api_name": "labels1.y1", "line_number": 82, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 114, "usage_type": "attribute"}, {"api_name": "utils.load_dataset", "line_number": 118, "usage_type": "call"}, {"api_name": "datavec1.X1_num", "line_number": 119, "usage_type": "argument"}, {"api_name": "labels1.y1", "line_number": 119, "usage_type": "argument"}, {"api_name": "torch.from_numpy", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.log_softmax", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.numel", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.log_softmax", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 148, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 150, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 151, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 162, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 194, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 198, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 205, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 241, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 243, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 249, "usage_type": "call"}, {"api_name": "utils.set_logger", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 268, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 278, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 278, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 279, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 290, "usage_type": "call"}, {"api_name": "os.path", "line_number": 290, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 291, "usage_type": "call"}, {"api_name": "utils.load_checkpoint", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 295, "usage_type": "call"}, {"api_name": "os.path", "line_number": 295, "usage_type": "attribute"}, {"api_name": "utils.save_checkpoint", "line_number": 307, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 312, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 316, "usage_type": "call"}, {"api_name": "os.path", "line_number": 316, "usage_type": "attribute"}, {"api_name": "utils.save_dict_to_json", "line_number": 318, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 323, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 324, "usage_type": "call"}, {"api_name": "os.path", "line_number": 324, "usage_type": "attribute"}, {"api_name": "utils.save_dict_to_json", "line_number": 326, "usage_type": "call"}]} +{"seq_id": "439889572", "text": "import time\nfrom utils_local import setting_parameters, loading_plus_preprocessing_data_with_labels, \\\n train_model_with_labels, test_model_with_labels, save_loss_info_into_a_file, create_folder_if_needed\nimport torch\nfrom torch import nn\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision.utils import save_image\nimport os\nfrom models.predict_body_angle_model import Predict_body_angle\n\n\ndef main():\n # ====== setting the hp parameters =====\n folder_dir, args = setting_parameters()\n tensorboard_writer = SummaryWriter(folder_dir)\n # ===== set a seed for the run ======\n torch.manual_seed(args['seed'])\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n # ====== load dataset ======\n dataloader_dict = loading_plus_preprocessing_data_with_labels(args, split_to_train_val=True)\n # ===== if args['save_latent_space'] is True create a dataloader of all of the data =======\n if args['save_latent_space']:\n dataloader_dict['all_data'] = loading_plus_preprocessing_data_with_labels(args)\n\n # ====== visualize validation data =======\n img_to_plot = dataloader_dict[1].dataset.dataset.tensors[1][:16]\n save_images_path = os.path.join(folder_dir, 'Images')\n create_folder_if_needed(save_images_path)\n save_image(img_to_plot, os.path.join(save_images_path, 'row val data.png'))\n\n # ====== initializing the model, the loss and the optimizer function =======\n model = Predict_body_angle(args['latent_space_dim']).to(args['device'])\n if args['load_checkpoint']:\n checkpoint = torch.load(os.path.join(args['checkpoint_path'], args['checkpoint_to_load']))\n model.load_state_dict(checkpoint['model_state_dict'])\n criterion = nn.MSELoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=args['lr'])\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=15)\n # ===== train the model ======\n save_model_dir = os.path.join(folder_dir, 'model check points')\n create_folder_if_needed(save_model_dir)\n for epoch in range(args['epochs']):\n start_epoch = time.time()\n checkpoint_latent_space_interval = args['checkpoint_latent_space_interval'] if args['save_latent_space'] else None\n train_loss = train_model_with_labels(model, dataloader_dict, optimizer, criterion, epoch,\n args['save_latent_space'], folder_dir, checkpoint_latent_space_interval)\n if (epoch % args['val_check_interval']) == 0:\n val_loss = test_model_with_labels(model, dataloader_dict[1], criterion, epoch, None)\n scheduler.step()\n # ====== write to tensorboard =======\n train_loss_avr = train_loss / len(dataloader_dict[0])\n val_loss_avr = val_loss / len(dataloader_dict[1])\n tensorboard_writer.add_scalars('train/val loss',\n {'train_loss': train_loss_avr, 'val loss': val_loss_avr},\n epoch)\n save_loss_info_into_a_file(train_loss_avr, val_loss_avr, folder_dir, epoch, scheduler.get_lr()[0])\n end_epoch = time.time()\n # ====== print status in console =======\n print('{} Epoch: Train Loss {:.5f}, Validation loss {:.5f} time {:.2f}, lr {:.8f}'\n .format(epoch, train_loss_avr, val_loss_avr, end_epoch - start_epoch,\n scheduler.get_lr()[0]))\n if args['save_model_checkpoints']:\n if (epoch % args['checkpoint_interval']) == 0:\n model_dict = {'model_state_dict': model.state_dict(), 'model_name': model._get_name()}\n torch.save(model_dict, os.path.join(save_model_dir, 'model_{}_epoch.pth.tar'.format(epoch)))\n model_dict = {'model_state_dict': model.state_dict(), 'model_name': model._get_name()}\n torch.save(model_dict, os.path.join(save_model_dir, 'model_{}_epoch.pth.tar'.format(epoch)))\n tensorboard_writer.close()\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "Predict_body_angle/train predict body angle.py", "file_name": "train predict body angle.py", "file_ext": "py", "file_size_in_byte": 4051, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "utils_local.setting_parameters", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 19, "usage_type": "attribute"}, {"api_name": "utils_local.loading_plus_preprocessing_data_with_labels", "line_number": 21, "usage_type": "call"}, {"api_name": "utils_local.loading_plus_preprocessing_data_with_labels", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "utils_local.create_folder_if_needed", "line_number": 29, "usage_type": "call"}, {"api_name": "torchvision.utils.save_image", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.predict_body_angle_model.Predict_body_angle", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.nn.MSELoss", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "utils_local.create_folder_if_needed", "line_number": 42, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "utils_local.train_model_with_labels", "line_number": 46, "usage_type": "call"}, {"api_name": "utils_local.test_model_with_labels", "line_number": 49, "usage_type": "call"}, {"api_name": "utils_local.save_loss_info_into_a_file", "line_number": 57, "usage_type": "call"}, {"api_name": "time.time", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}]} +{"seq_id": "125661320", "text": "import numpy as np\r\nfrom scipy.optimize.optimize import approx_fprime\r\n\r\n\"\"\"\r\nImplementation of function objects.\r\nFunction objects encapsulate the behaviour of an objective function that we optimize.\r\nSimply put, implement evaluate(w, X, y) to get the numerical values corresponding to:\r\nf, the function value (scalar) and\r\ng, the gradient (vector).\r\n\r\nFunction objects are used with optimizers to navigate the parameter space and\r\nto find the optimal parameters (vector). See optimizers.py.\r\n\"\"\"\r\n\r\ndef log_1_plus_exp_safe(x):\r\n out = np.log(1+np.exp(x))\r\n out[x > 100] = x[x>100]\r\n out[x < -100] = np.exp(x[x < -100])\r\n return out\r\n\r\nclass FunObj:\r\n \"\"\"\r\n Function object for encapsulating evaluations of functions and gradients\r\n \"\"\"\r\n\r\n def evaluate(self, w, X, y):\r\n \"\"\"\r\n Evaluates the function AND its gradient w.r.t. w.\r\n Returns the numerical values based on the input.\r\n IMPORTANT: w is assumed to be a 1d-array, hence shaping will have to be handled.\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n def check_correctness(self, w, X, y):\r\n n, d = X.shape\r\n estimated_gradient = approx_fprime(w, lambda w: self.evaluate(w, X, y)[0], epsilon=1e-6)\r\n _, implemented_gradient = self.evaluate(w, X, y)\r\n difference = estimated_gradient - implemented_gradient\r\n if np.max(np.abs(difference) > 1e-3):\r\n print('User and numerical derivatives differ: %s vs. %s' % (estimated_gradient, implemented_gradient))\r\n else:\r\n print('User and numerical derivatives agree.')\r\n\r\nclass FunObjLeastSquares(FunObj):\r\n \r\n def evaluate(self, w, X, y):\r\n \"\"\"\r\n Evaluates the function and gradient of least squares objective.\r\n Least squares objective is the sum of squared residuals.\r\n \"\"\"\r\n\r\n # Prediction is linear combination\r\n y_hat = X@w\r\n # Residual is difference between prediction and ground truth\r\n residuals = y_hat - y\r\n # Squared residuals gives us the objective function value\r\n f = 0.5 * np.sum(residuals ** 2)\r\n # Analytical gradient, written in mathematical form first\r\n # and then translated into Python\r\n g = X.T@X@w - X.T@y\r\n return f, g\r\n\r\nclass FunObjLeastSquaresL2(FunObj):\r\n \r\n def __init__(self, lammy):\r\n self.lammy = lammy\r\n\r\n def evaluate(self, w, X, y):\r\n \"\"\"\r\n Evaluates the function and gradient of least squares objective.\r\n Least squares objective is the sum of squared residuals.\r\n \"\"\"\r\n n, d = X.shape\r\n\r\n # Prediction is linear combination\r\n y_hat = X@w\r\n # Residual is difference between prediction and ground truth\r\n residuals = y_hat - y\r\n # Squared residuals gives us the objective function value\r\n f = 0.5 * np.sum(residuals ** 2) + 0.5 * self.lammy * np.sum(w ** 2)\r\n # Analytical gradient, written in mathematical form first\r\n # and then translated into Python\r\n g = (X.T@X + self.lammy * np.eye(d)) @ w - X.T@y\r\n return f, g\r\n\r\nclass FunObjRobustRegression(FunObj):\r\n \r\n def evaluate(self, w, X, y):\r\n \"\"\"\r\n Evaluates the function and gradient of ROBUST least squares objective.\r\n \"\"\"\r\n\r\n n, d = X.shape\r\n\r\n # Calculate the function value\r\n f = 0\r\n for i in range(n):\r\n # Tip: when you have two terms, it's useful to call them \"left\" and \"right\".\r\n # Believe or not, having two terms show up in your functions is extremely common.\r\n left = np.exp(w@X[i,:] - y[i])\r\n right = np.exp(y[i] - w@X[i,:])\r\n f += np.log(left + right)\r\n\r\n # Calculate the gradient value\r\n r = np.zeros(n)\r\n for i in range(n):\r\n left = np.exp(w@X[i,:] - y[i])\r\n right = np.exp(y[i] - w@X[i,:])\r\n r[i] = (left - right) / (left + right)\r\n g = X.T@r\r\n\r\n return f, g\r\n\r\nclass FunObjLogReg(FunObj):\r\n\r\n def evaluate(self, w, X, y):\r\n \"\"\"\r\n Evaluates the function and gradient of logistics regression objective.\r\n \"\"\" \r\n Xw = X @ w\r\n yXw = y * Xw # element-wise multiply\r\n yXw = np.clip(yXw, -100, 100) # safeguarding\r\n\r\n # Calculate the function value\r\n # f = np.sum(np.log(1. + np.exp(-yXw)))\r\n f = np.sum(log_1_plus_exp_safe(-yXw))\r\n\r\n # Calculate the gradient value\r\n res = - y / (1. + np.exp(yXw))\r\n g = X.T @ res\r\n \r\n return f, g\r\n\r\nclass FunObjLogRegL2(FunObj):\r\n\r\n def __init__(self, lammy):\r\n self.lammy = lammy\r\n\r\n def evaluate(self, w, X, y):\r\n \"\"\"\r\n Evaluates the function and gradient of L2-regularized logistics regression objective.\r\n \"\"\" \r\n Xw = X @ w\r\n yXw = y * Xw # element-wise multiply\r\n \r\n # Calculate the function value\r\n f = np.sum(log_1_plus_exp_safe(-yXw)) + 0.5 * self.lammy * np.sum(w ** 2)\r\n \r\n # Calculate the gradient value\r\n res = - y / (1. + np.exp(yXw))\r\n g = X.T @ res + self.lammy * w\r\n \r\n return f, g\r\n\r\nclass FunObjLogRegL2Kernel(FunObj):\r\n\r\n def __init__(self, lammy):\r\n self.lammy = lammy\r\n\r\n def evaluate(self, w, X, y):\r\n \"\"\"\r\n Evaluates the function and gradient of L2-regularized logistics regression objective.\r\n \"\"\" \r\n Xw = X @ w\r\n yXw = y * Xw # element-wise multiply\r\n \r\n # Calculate the function value\r\n f = np.sum(log_1_plus_exp_safe(-yXw)) + 0.5 * self.lammy * w.T @ X @ w\r\n \r\n # Calculate the gradient value\r\n res = - y / (1. + np.exp(yXw))\r\n g = X.T @ res + self.lammy * X @ w\r\n \r\n return f, g\r\n\r\n\r\nclass FunObjLogRegL0(FunObj):\r\n\r\n def __init__(self, lammy):\r\n self.lammy = lammy\r\n\r\n def evaluate(self, w, X, y):\r\n \"\"\"\r\n Evaluates the function value of of L0-regularized logistics regression objective.\r\n \"\"\" \r\n Xw = X @ w\r\n yXw = y * Xw # element-wise multiply\r\n\r\n # Calculate the function value\r\n f = np.sum(np.log(1. + np.exp(-yXw))) + self.lammy * len(w)\r\n \r\n # We cannot differentiate the \"length\" function\r\n g = None\r\n return f, g\r\n\r\nclass FunObjSoftmax(FunObj):\r\n\r\n def evaluate(self, w, X, y):\r\n n, d = X.shape\r\n k = len(np.unique(y))\r\n\r\n W = w.reshape(k, d)\r\n G = np.zeros([k, d])\r\n \r\n # Precompute dot products\r\n XW = X @ W.T # n-by-k matrix, XW[i, c] is the dot product between example i and class c weights\r\n exp_XW = np.exp(XW) # n-by-k matrix, exp_XW[i, c] is the exponential of dot product between example i and class c weights\r\n sum_exp_XW = np.sum(exp_XW, axis=1) # n-by-1 vector, sum_exp_XW[i] is the sum of exponentials of dot products between example i and each class's weights\r\n log_sum_exp_XW = np.log(sum_exp_XW) # self-explanatory\r\n \r\n # Precompute p\r\n p = np.zeros([k, n]) # p[c, i] is the softmax probability p(y_i = c | W, x_i).\r\n for c in range(k):\r\n for i in range(n):\r\n p[c, i] = exp_XW[i, c] / sum_exp_XW[i]\r\n\r\n # Compute f value\r\n f = 0\r\n for i in range(n):\r\n left = -XW[i, y[i]]\r\n right = log_sum_exp_XW[i]\r\n f += left + right\r\n\r\n # Compute gradient. More vectorized the better\r\n for c in range(k):\r\n for j in range(d):\r\n left = X[:, j]\r\n right = p[c, :] - (y == c)\r\n G[c, j] = np.sum(left * right)\r\n g = G.reshape(-1)\r\n return f, g", "sub_path": "5/code/fun_obj.py", "file_name": "fun_obj.py", "file_ext": "py", "file_size_in_byte": 7719, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.log", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.optimize.optimize.approx_fprime", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "395984866", "text": "\"\"\"\nAPI for controlling OSRM backend servers.\n\"\"\"\nimport re\n\nimport requests\nimport requests.adapters\nfrom flask import Flask, request\nfrom urllib3 import Retry\nfrom werkzeug.routing import UnicodeConverter, ValidationError, Rule\n\nfrom osrm.osrmcontroller import OsrmController\nfrom osrm.tasks import (\n extract as extract_task,\n contract as contract_task,\n restart as restart_task,\n revoke_all_scheduled_tasks_for_osrm_worker,\n)\n\nAPI_NAME = \"OSRM Manager\"\nOSRM_CONVERTER_NAME = \"osrm\"\n\n\ndef retrying_requests() -> requests.Session:\n \"\"\"Create a Requests session that retries it's requests for about 10 s.\"\"\"\n session = requests.Session()\n retries = Retry(total=5, backoff_factor=0.3) # Max 9.3 seconds of for all retries\n session.mount(\"http://\", requests.adapters.HTTPAdapter(max_retries=retries))\n return session\n\n\ndef format_docstrings(docstring: bytes) -> str:\n \"\"\"Fix the formatting of docstrings for a nicer display in API documentation.\"\"\"\n docstring = str(docstring).replace(\"\\n\", \"\")\n docstring = re.sub(r\" +\", \" \", docstring)\n docstring = re.sub(\"^ \", \"\", docstring)\n docstring = re.sub(\" $\", \"\", docstring)\n return docstring\n\n\ndef api_factory(osrm_controller: OsrmController) -> Flask:\n \"\"\"\n Build a Flask API app for the specified OSRM controller instance.\n \"\"\"\n # pylint: disable=unused-variable\n # ^ flask routes are detected as unused\n\n app = Flask(__name__, static_folder=None)\n app.config[\"MAX_CONTENT_LENGTH\"] = 100 * 1024 * 1024 # Limit uploads to 100 MB\n\n class OsrmServerNameConverter(UnicodeConverter):\n \"\"\"\n Used to validate OSRM server names\n \"\"\"\n\n def to_python(self, value):\n if value not in osrm_controller.server_names:\n raise ValidationError()\n return super().to_python(value)\n\n app.url_map.converters[OSRM_CONVERTER_NAME] = OsrmServerNameConverter\n\n @app.route(\"/\", methods=[\"GET\"])\n def index():\n \"\"\"\n Return the list of available URLs\n \"\"\"\n url_docs = {}\n\n for rule in app.url_map.iter_rules():\n rule: Rule\n doc = app.view_functions[rule.endpoint].__doc__\n if any(\n [\n isinstance(converter, OsrmServerNameConverter)\n for converter in rule._converters.values() # pylint: disable=protected-access\n ]\n ):\n doc += \"; Valid OSRM server names: \" + \", \".join(\n osrm_controller.server_names\n )\n\n url_docs[str(rule)] = {\n \"allowed_methods\": list(rule.methods),\n \"doc\": format_docstrings(doc),\n }\n\n return url_docs\n\n @app.route(\"/status\", methods=[\"GET\"])\n def status():\n \"\"\"Status of OSRM servers\"\"\"\n # need to load controller from Redis as server process IDs could have changed\n # if OSRM server was restarted since creation of API\n recent_osrm_controller = OsrmController.get_controller_from_redis()\n return recent_osrm_controller.status()\n\n @app.route(\n f\"/osrm/<{OSRM_CONVERTER_NAME}:server_name>/\", methods=[\"GET\"]\n )\n def osrm_proxy(server_name: str, osrm_path: str):\n \"\"\"\n Proxy the request to the selected OSRM backend server.\n See ProjectOSRM backend HTTP API documentation for details\n (https://github.com/Project-OSRM/osrm-backend/blob/master/docs/http.md)\n \"\"\"\n osrm_server_id = osrm_controller.get_server_id(server_name)\n port = osrm_controller.port_bindings[osrm_server_id]\n\n response = retrying_requests().get(\n f\"http://127.0.0.1:{port}/{osrm_path}?{request.query_string.decode()}\",\n stream=True,\n )\n return response.raw.read(), response.status_code, response.headers.items()\n\n @app.route(\n f\"/control/<{OSRM_CONVERTER_NAME}:server_name>/restart\", methods=[\"POST\"]\n )\n def restart(server_name: str):\n \"\"\"Restart the selected OSRM server.\"\"\"\n restart_task.apply_async(args=(server_name,), queue=f\"osrm_{server_name}_queue\")\n return {\"success\": True}\n\n @app.route(\n f\"/control/<{OSRM_CONVERTER_NAME}:server_name>/extract-data\", methods=[\"POST\"]\n )\n def extract(server_name: str):\n \"\"\"\n Force an extraction of OSM data. You will almost always\n want to run 'contract-data' after this operation.\n \"\"\"\n extract_task.apply_async(args=(server_name,), queue=f\"osrm_{server_name}_queue\")\n return {\"success\": True}\n\n @app.route(\n f\"/control/<{OSRM_CONVERTER_NAME}:server_name>/contract-data\", methods=[\"POST\"]\n )\n def contract_data(server_name: str):\n \"\"\"\n Force OSRM data contraction. An additional CSV file with traffic\n data can be posted in the request body.\n\n For CSV file format see:\n https://github.com/Project-OSRM/osrm-backend/wiki/Traffic\n \"\"\"\n osrm_server_id = osrm_controller.get_server_id(server_name)\n revoke_all_scheduled_tasks_for_osrm_worker(osrm_server_id)\n\n if not request.files:\n contract_task.apply_async(\n args=(server_name,), queue=f\"osrm_{server_name}_queue\"\n )\n return {\"success\": True, \"withTraffic\": False}\n\n if len(request.files) != 1 or not next(\n request.files.values()\n ).filename.endswith(\".csv\"):\n return {\n \"success\": False,\n \"message\": f\"At most one .csv file must be provided\",\n }\n\n contract_task.apply_async(\n args=(server_name, next(request.files.values()).read().decode()),\n queue=f\"osrm_{server_name}_queue\",\n )\n\n update_without_traffic_chain = contract_task.signature(\n countdown=600, args=(server_name,), queue=f\"osrm_{server_name}_queue\"\n ) | restart_task.signature(\n args=(server_name,), queue=f\"osrm_{server_name}_queue\", immutable=True\n )\n\n update_without_traffic_chain.delay()\n\n return {\"success\": True, \"withTraffic\": True}\n\n return app\n", "sub_path": "osrm/osrmapi.py", "file_name": "osrmapi.py", "file_ext": "py", "file_size_in_byte": 6175, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requests.Session", "line_number": 26, "usage_type": "call"}, {"api_name": "urllib3.Retry", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.adapters.HTTPAdapter", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.adapters", "line_number": 28, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 24, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 35, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 36, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 37, "usage_type": "call"}, {"api_name": "osrm.osrmcontroller.OsrmController", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 48, "usage_type": "call"}, {"api_name": "werkzeug.routing.UnicodeConverter", "line_number": 51, "usage_type": "name"}, {"api_name": "werkzeug.routing.ValidationError", "line_number": 58, "usage_type": "call"}, {"api_name": "werkzeug.routing.Rule", "line_number": 71, "usage_type": "name"}, {"api_name": "osrm.osrmcontroller.OsrmController.get_controller_from_redis", "line_number": 95, "usage_type": "call"}, {"api_name": "osrm.osrmcontroller.OsrmController", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.request.query_string.decode", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.request.query_string", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "osrm.tasks.restart.apply_async", "line_number": 121, "usage_type": "call"}, {"api_name": "osrm.tasks.restart", "line_number": 121, "usage_type": "name"}, {"api_name": "osrm.tasks.extract.apply_async", "line_number": 132, "usage_type": "call"}, {"api_name": "osrm.tasks.extract", "line_number": 132, "usage_type": "name"}, {"api_name": "osrm.tasks.revoke_all_scheduled_tasks_for_osrm_worker", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 149, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 149, "usage_type": "name"}, {"api_name": "osrm.tasks.contract.apply_async", "line_number": 150, "usage_type": "call"}, {"api_name": "osrm.tasks.contract", "line_number": 150, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 155, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 155, "usage_type": "name"}, {"api_name": "flask.request.files.values", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 156, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 156, "usage_type": "name"}, {"api_name": "osrm.tasks.contract.apply_async", "line_number": 163, "usage_type": "call"}, {"api_name": "osrm.tasks.contract", "line_number": 163, "usage_type": "name"}, {"api_name": "flask.request.files.values", "line_number": 164, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 164, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 164, "usage_type": "name"}, {"api_name": "osrm.tasks.contract.signature", "line_number": 168, "usage_type": "call"}, {"api_name": "osrm.tasks.contract", "line_number": 168, "usage_type": "name"}, {"api_name": "osrm.tasks.restart.signature", "line_number": 170, "usage_type": "call"}, {"api_name": "osrm.tasks.restart", "line_number": 170, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "158912621", "text": "from django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import redirect\nfrom .models import Park, Ride, Parkhours, RideWaits, Weather, UserRideFavorite, UserNotifications, UserPhonenumber\nimport json\nfrom .generic_functions import fix_time\nfrom datetime import datetime\nfrom django.contrib.auth.decorators import login_required\nimport pymysql\nimport pandas as pd\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom .forms import NotificationForm\nfrom .forms import PhonenumberForm\nimport requests\nimport config\n# Create your views here.\n\ndef terms_of_use(request):\n return render(request, 'terms_of_use.html')\n\ndef parks_list(request):\n park_list = Park.objects.all()\n context = {'park_list': park_list}\n return render(request, 'parks_list.html', context)\n\ndef park_detail(request, park_id):\n park = get_object_or_404(Park,pk = park_id)\n ride_list = park.ride_set.filter(haswaits = 1)\n\n valid_rides = [x for x in ride_list if x.current_wait() is not None]\n valid_rides = [x for x in valid_rides if x.current_status() == \"Operating\"]\n\n sorted_waits = sorted(valid_rides, key = lambda x: x.current_wait(), reverse = True)\n\n top_waits = sorted_waits[:5]\n lowest_waits = sorted_waits[-5:]\n weather = park.current_weather()\n weather_url = \"http://openweathermap.org/img/w/\"+ str(weather.iconname) +\".png\"\n\n context = {'park':park,\n 'rides_with_waits':ride_list,\n 'top_waits':top_waits,\n 'lowest_waits':lowest_waits[::-1],\n 'current_weather':weather,\n 'weather_url':weather_url}\n return render(request, 'park_detail.html', context)\n\ndef ride_detail(request, ride_id):\n user = None\n doesnt_follow = True\n if request.user.is_authenticated:\n user = request.user\n if len(UserRideFavorite.objects.filter(userid = int(request.user.id), rideid = int(ride_id))) > 0:\n doesnt_follow = False\n\n ride = get_object_or_404(Ride, pk = ride_id)\n\n wait_times = ride.todaywaits_set.all()\n\n categories = list()\n observed_waits_data = list()\n\n categories_predicted = list()\n predicted_wait_times_list = list()\n\n predicted_wait_times = ride.predicted_waits()\n\n predicted_series_data = []\n observed_series_data = []\n confidence_interval_series = []\n\n today = datetime.today()\n ep = datetime(1970,1,1,0,0,0)\n for obs in predicted_wait_times:\n hours = int(str(obs.time)[0:2])\n minutes = int(str(obs.time)[3:5])\n\n today = today.replace(hour = hours, minute = minutes, second = 0, microsecond = 0)\n time = (today - ep).total_seconds()*1000\n\n current_tup = [time, obs.confidencelow, obs.confidencehigh]\n confidence_interval_series.append(current_tup)\n\n\n\n for obs in predicted_wait_times:\n hours = int(str(obs.time)[0:2])\n minutes = int(str(obs.time)[3:5])\n\n today = today.replace(hour = hours, minute = minutes, second = 0, microsecond = 0)\n time = (today - ep).total_seconds()*1000\n\n current_tup = [time, obs.predictedwait]\n predicted_series_data.append(current_tup)\n\n categories_predicted.append(fix_time(obs.time))\n predicted_wait_times_list.append(obs.predictedwait)\n\n for obs in wait_times:\n hours = int(str(obs.time)[0:2])\n minutes = int(str(obs.time)[3:5])\n\n today = today.replace(hour = hours, minute = minutes, second = 0, microsecond = 0)\n time = (today - ep).total_seconds()*1000\n current_tup = [time, obs.wait]\n categories.append(fix_time(obs.time))\n observed_waits_data.append(obs.wait)\n current_dict = {'name':fix_time(obs.time),\n 'y': obs.wait}\n\n observed_series_data.append(current_tup)\n\n\n\n\n observed_waits = {\n 'name': \"Observed Wait Time\",\n 'data': observed_series_data,\n 'marker':{'enabled': False},\n 'zIndex':2,\n 'type':'spline'\n }\n\n predicted_waits = {\n 'name': \"Anticipated Wait Time\",\n 'data': predicted_series_data,\n 'marker':{'enabled':False,\n 'lineColor': 'Highcharts.getOptions().colors[0]'},\n 'zIndex':1,\n 'type':'spline'\n }\n\n confidence_window = {\n 'name': 'Anticipated Range',\n 'type':'arearange',\n 'data': confidence_interval_series,\n 'lineWidth': 0,\n 'color': 'rgba(84,190,231,1)',\n 'linkedTo': ':previous',\n 'fillOpacity': 0.15,\n 'zIndex': 0,\n 'marker': {\n 'enabled': False\n }\n }\n\n\n tickInterval = 2\n\n if len(categories) > 10:\n tickInterval = 5\n\n if len(categories) > 25:\n tickInterval = 8\n\n if len(categories) > 35:\n tickInterval = 10\n\n\n\n series = [observed_waits]\n chart_title = \"Wait Times for \" + str(ride.name)\n chart = {\n 'title': {'text': chart_title},\n 'xAxis': {'type': 'datetime',\n 'dateTimeLabelFormats': {\n 'day': '%e of %b',\n 'minute': '%I:%M',\n 'hour': '%I:%M'\n }},\n 'yAxis':{'title':{\n 'text': \"Wait(Minutes)\"\n }},\n 'tooltip':{\n 'xDateFormat': '%I:%M',\n 'crosshairs':True,\n 'shared':True\n },\n 'series': [ predicted_waits, observed_waits,confidence_window]\n }\n\n dump = json.dumps(chart)\n\n\n context = {\n 'ride':ride,\n 'chart':dump,\n 'current_user':user,\n 'doesnt_follow': doesnt_follow\n }\n return render(request, 'ride_detail.html', context)\n\n\ndef no_wait_rides(request, park_id):\n park = get_object_or_404(Park, pk = park_id)\n attraction_list = park.ride_set.filter(haswaits = 0)\n context = {'park':park,\n 'other_attractions':attraction_list}\n\n return render(request, 'other_attractions.html', context)\n\n\ndef all_wait_times(request, park_id):\n park = get_object_or_404(Park, pk = park_id)\n attraction_list = park.ride_set.filter(haswaits = 1)\n context = {'park':park,\n 'rides_with_waits':attraction_list}\n\n return render(request, 'park_wait_times.html', context)\n\n\n@login_required\ndef user_home_page(request):\n username = request.user.username\n full_name = str(request.user.first_name) +\" \"+ str(request.user.last_name)\n userid = request.user.id\n ride_objects = None\n followed_rides = UserRideFavorite.objects.filter(userid = int(request.user.id))\n phone_numbers = UserPhonenumber.objects.filter(userid = int(request.user.id))\n numbers = None\n if len(phone_numbers) > 0:\n numbers = phone_numbers\n if len(followed_rides) > 0:\n ride_ids = [x.rideid for x in followed_rides]\n ride_objects = Ride.objects.filter(id__in = ride_ids)\n\n context = {'current_user_name':username,\n 'current_user_id':userid,\n 'rides': ride_objects,\n 'full_name': full_name,\n 'phone_number': numbers}\n return render(request, 'user_home_page.html', context)\n\n\n\n\n@login_required\ndef update_favorite(request, ride_id):\n doesnt_follow = True\n\n if request.user.is_authenticated:\n user = request.user\n if len(UserRideFavorite.objects.filter(userid = int(request.user.id), rideid = int(ride_id))) > 0:\n doesnt_follow = False\n\n if doesnt_follow:\n current_user_id = request.user.id\n ride_id = int(ride_id)\n\n favorite = UserRideFavorite()\n favorite.userid = current_user_id\n favorite.rideid = ride_id\n\n favorite.save()\n\n else:\n current_user_id = request.user.id\n ride_id = int(ride_id)\n UserRideFavorite.objects.filter(userid = int(request.user.id), rideid = int(ride_id)).delete()\n\n return HttpResponseRedirect(reverse('ride_detail', args = (ride_id,)))\n\n\n@login_required\ndef add_notifications(request, ride_id):\n if request.method == \"POST\":\n form = NotificationForm(request.POST)\n if form.is_valid():\n new_notification = UserNotifications()\n new_notification.userid = request.user.id\n new_notification.rideid = ride_id\n user_number = UserPhonenumber.objects.filter(userid = request.user.id)\n if len(user_number) > 0:\n this_user = user_number[0]\n new_notification.phonenumber = this_user.phonenumber\n new_notification.datestart = form.cleaned_data['datestart']\n new_notification.dateend = form.cleaned_data['dateend']\n\n new_notification.save()\n return redirect('user_home_page')\n else:\n form = NotificationForm()\n return render(request, 'notification_setup.html', {'form':form})\n\n@login_required\ndef add_phonenumber(request):\n if request.method == \"POST\":\n form = PhonenumberForm(request.POST)\n if form.is_valid():\n new_number = UserPhonenumber()\n new_number.userid = request.user.id\n new_number.phonenumber = form.cleaned_data['phonenumber']\n\n new_number.save()\n return redirect('user_home_page')\n else:\n form = PhonenumberForm()\n return render(request, 'add_number.html', {'form':form})\n\n@login_required\ndef remove_notification(request, ride_id):\n current_user_id = request.user.id\n ride_id = int(ride_id)\n UserNotifications.objects.filter(userid = int(request.user.id), rideid = int(ride_id)).delete()\n\n return redirect('user_home_page')\n\n@login_required\ndef go_to_ride(request, ride_id, lat_long):\n current_user_id = request.user.id\n\n test = True\n if test:\n lat_long = \"28.418432,-81.5802548\"\n\n latitude = lat_long.split(\",\")[0]\n longitude = lat_long.split(\",\")[1]\n\n ride = get_object_or_404(Ride, pk = ride_id)\n\n ride_lat = str(ride.latitude)\n ride_long = str(ride.longitude)\n\n origin = \"origin=\"+latitude+\",\"+longitude\n destination = \"destination=\"+ride_lat+\",\"+ride_long\n apikey = config.google_maps_api_key\n full_string = \"https://www.google.com/maps/embed/v1/directions?key=\"+str(apikey) +\"&\"+ origin +\"&\" + destination + \"&mode=walking\"\n # answer = requests.get(full_string)\n # json_object = answer.json()\n # polyline = json_object['routes'][0]['overview_polyline']['points']\n # ride_id = int(ride_id)\n context = {\n 'ride_id':ride_id,\n 'lat_long':lat_long,\n 'return':full_string\n }\n return render(request, 'go_to_ride.html',context)\n", "sub_path": "disneyWaitsApp/waittimes/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 10529, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Park.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Park.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.Park", "line_number": 23, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Park", "line_number": 28, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "models.UserRideFavorite.objects.filter", "line_number": 54, "usage_type": "call"}, {"api_name": "models.UserRideFavorite.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.UserRideFavorite", "line_number": 54, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Ride", "line_number": 57, "usage_type": "argument"}, {"api_name": "datetime.datetime.today", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "call"}, {"api_name": "generic_functions.fix_time", "line_number": 97, "usage_type": "call"}, {"api_name": "generic_functions.fix_time", "line_number": 107, "usage_type": "call"}, {"api_name": "generic_functions.fix_time", "line_number": 109, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 183, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 192, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 196, "usage_type": "call"}, {"api_name": "models.Park", "line_number": 196, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 201, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 205, "usage_type": "call"}, {"api_name": "models.Park", "line_number": 205, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 210, "usage_type": "call"}, {"api_name": "models.UserRideFavorite.objects.filter", "line_number": 219, "usage_type": "call"}, {"api_name": "models.UserRideFavorite.objects", "line_number": 219, "usage_type": "attribute"}, {"api_name": "models.UserRideFavorite", "line_number": 219, "usage_type": "name"}, {"api_name": "models.UserPhonenumber.objects.filter", "line_number": 220, "usage_type": "call"}, {"api_name": "models.UserPhonenumber.objects", "line_number": 220, "usage_type": "attribute"}, {"api_name": "models.UserPhonenumber", "line_number": 220, "usage_type": "name"}, {"api_name": "models.Ride.objects.filter", "line_number": 226, "usage_type": "call"}, {"api_name": "models.Ride.objects", "line_number": 226, "usage_type": "attribute"}, {"api_name": "models.Ride", "line_number": 226, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 233, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 213, "usage_type": "name"}, {"api_name": "models.UserRideFavorite.objects.filter", "line_number": 244, "usage_type": "call"}, {"api_name": "models.UserRideFavorite.objects", "line_number": 244, "usage_type": "attribute"}, {"api_name": "models.UserRideFavorite", "line_number": 244, "usage_type": "name"}, {"api_name": "models.UserRideFavorite", "line_number": 251, "usage_type": "call"}, {"api_name": "models.UserRideFavorite.objects.filter", "line_number": 260, "usage_type": "call"}, {"api_name": "models.UserRideFavorite.objects", "line_number": 260, "usage_type": "attribute"}, {"api_name": "models.UserRideFavorite", "line_number": 260, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 262, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 262, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 238, "usage_type": "name"}, {"api_name": "forms.NotificationForm", "line_number": 268, "usage_type": "call"}, {"api_name": "models.UserNotifications", "line_number": 270, "usage_type": "call"}, {"api_name": "models.UserPhonenumber.objects.filter", "line_number": 273, "usage_type": "call"}, {"api_name": "models.UserPhonenumber.objects", "line_number": 273, "usage_type": "attribute"}, {"api_name": "models.UserPhonenumber", "line_number": 273, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 281, "usage_type": "call"}, {"api_name": "forms.NotificationForm", "line_number": 283, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 284, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 265, "usage_type": "name"}, {"api_name": "forms.PhonenumberForm", "line_number": 289, "usage_type": "call"}, {"api_name": "models.UserPhonenumber", "line_number": 291, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 296, "usage_type": "call"}, {"api_name": "forms.PhonenumberForm", "line_number": 298, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 299, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 286, "usage_type": "name"}, {"api_name": "models.UserNotifications.objects.filter", "line_number": 305, "usage_type": "call"}, {"api_name": "models.UserNotifications.objects", "line_number": 305, "usage_type": "attribute"}, {"api_name": "models.UserNotifications", "line_number": 305, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 307, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 301, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 320, "usage_type": "call"}, {"api_name": "models.Ride", "line_number": 320, "usage_type": "argument"}, {"api_name": "config.google_maps_api_key", "line_number": 327, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 338, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 309, "usage_type": "name"}]} +{"seq_id": "252266004", "text": "# coding=utf-8\nfrom flask import Blueprint\nfrom actions.foo.bar import BarTest\n\n# init blueprint\nfoo = Blueprint('blueprints', __name__)\n\n\n# routes\n@foo.route(\"/bar/\")\ndef bar_test(test):\n controller = BarTest({\"test\": test})\n return controller.run()\n", "sub_path": "blueprints/foo.py", "file_name": "foo.py", "file_ext": "py", "file_size_in_byte": 263, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask.Blueprint", "line_number": 6, "usage_type": "call"}, {"api_name": "actions.foo.bar.BarTest", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "539532193", "text": "__version__= '0.0.3'\r\n\"\"\"\r\nCreated on Tue Aug 21 21:54:53 2018\r\n\r\n@author: V\r\n\r\nNotes:\r\n .0:Started app\r\n .1:Functional\r\n .2:bigger buttons for touch\r\n .3:better sizes, more comments,\r\nTODO:\r\n Able to count both how much to remove to start day w/ given input amt & how m\r\n Make able to translate text to spanish\r\n Make sections clear\r\n Comment more of code for peace of mind\r\n HELP MENU\r\n \r\n\"\"\"\r\n#importing tools from other libraries, pretty standard - the only part of the script which imports chunks of code from other files\r\nimport pandas as pd #efficient number analysis\r\nimport sys #needed to get window to continue running\r\n\r\nfrom PyQt5.QtWidgets import * #GUI (Graphical User Interface) library\r\nfrom PyQt5 import QtGui, QtCore#Specific parts of PyQt5 for ease of coding\r\n###no more imports below this line###\r\n\r\nclass cashbox(object):#definition of cashbox & what it contains - amount to start, amount epos says, & amount counted by employee\r\n def __init__(self,amount=0):\r\n self.sum=amount\r\n self.empamt=dict([[i,0]for i in cashunits.keys()])#makes a new editable list with cashunits as keys\r\n self.setstartingsum()\r\n self.setepossum()\r\n def setstartingsum(self,amount=1070.5):#don't quite remember exact $\r\n self.startingsum=amount\r\n def setepossum(self,amount=None):\r\n if amount is not None:#if it's called by a user w/ an actual value instead of just to start the program\r\n self.epossum=amount\r\n def recalc(self):\r\n self.sum=sum(self.empamt.values())-self.startingsum#this is what has been put in throughout the day\r\n def check(self):#is sum greater or equal to what epos says? if so return True \r\n if self.sum>=self.eposamt:\r\n return True\r\n else:\r\n return False\r\n \r\n \r\n \r\n #Base units/amounts cash comes in, 'def'=if unit is default, 'amt'=value amount,'typ': bill or coin - rn only decides which col it's in...\r\n #rn security does nothing but it's possible to do smth to notify employee to verify authenticity\r\n \r\ncashunits={'Penny':{'def':True,'amt':0.01,'typ':'Coin','img':''},\r\n 'Nickel':{'def':True,'amt':0.05,'typ':'Coin','img':''},\r\n 'Dime':{'def':True,'amt':0.10,'typ':'Coin','img':''},\r\n 'Quarter':{'def':True,'amt':0.25,'typ':'Coin','img':''},\r\n 'Half Dollar':{'def':False,'amt':0.50,'typ':'Coin','img':''},\r\n '1 Dollar':{'def':True,'amt':1,'typ':'Bill','img':''},\r\n '2 Dollar':{'def':False,'amt':2,'typ':'Bill','img':''},\r\n '5 Dollar':{'def':True,'amt':5,'typ':'Bill','img':''},\r\n '10 Dollar':{'def':True,'amt':10,'typ':'Bill','img':''},\r\n '20 Dollar':{'def':True,'amt':20,'typ':'Bill','img':''},\r\n '50 Dollar':{'def':True,'amt':50,'typ':'Bill','img':'','security':True},\r\n '100 Dollar':{'def':True,'amt':100,'typ':'Bill','img':'','security':True}}\r\n\r\npcash=pd.DataFrame(cashunits)\r\n##############\r\nclass CashWindow(QMainWindow):#main window of actual interface person interacts with - everything above is backend\r\n def __init__(self): #init header\r\n super(CashWindow,self).__init__()#makes it so default QMainWindow stuff gets activated\r\n self.cashbox=cashbox()\r\n self.sum=0\r\n self.wind=QWidget()#Empty widget which serves as basis for layout, dumb but only way i've found to get menu bar not to mess up\r\n self.setCentralWidget(self.wind)\r\n self.Window()\r\n self.show() #shows window\r\n \r\n def Window(self):#all of what's shown is defined here\r\n layout = QGridLayout() #Can lay things out in a defined x/y coordinate space\r\n self.wind.setLayout(layout)\r\n \r\n menu=QMenuBar()#if there needs to be file/edit stuff in the future\r\n self.setMenuBar(menu)\r\n \r\n #each individual cash unit\r\n self.units=dict()\r\n bj,cj=0,0\r\n for i in cashunits.keys():\r\n \r\n if cashunits[i]['def']:#show only if default - will b editable\r\n \r\n if cashunits[i]['typ']=='Bill':\r\n j=1\r\n jj=bj#janky but works\r\n bj=bj+1\r\n else:\r\n j=2\r\n jj=cj\r\n cj=cj+1\r\n self.units[i]=unit(i)\r\n layout.addWidget(self.units[i],jj,j)\r\n self.units[i].numchanged.connect(lambda ii,i=i:self.recalcsum(i,ii))\r\n jj=jj+1\r\n #total\r\n self.values=dict([[i,0] for i in self.units.keys()])\r\n self.totaltext='TOTAL $:'\r\n self.total=QDoubleSpinBox(prefix=self.totaltext,readOnly=True,maximum=100000)\r\n layout.addWidget(self.total,4,2,2,2)\r\n self.total.setMinimumHeight(100)\r\n self.total.setObjectName('total')\r\n self.total.setButtonSymbols(2)#no updown buttons\r\n \r\n etext=QLabel(text='ePos says cash total should be')\r\n self.epos=QDoubleSpinBox(prefix='epos $:',maximum=100000)\r\n self.epos.setSingleStep(0.01)\r\n layout.addWidget(self.epos,2,0)\r\n layout.addWidget(etext,1,0)\r\n \r\n stext=QLabel(text='cash to start each day is:')\r\n samt=107.5\r\n self.strt=QDoubleSpinBox(prefix='start $:',maximum=100000)\r\n self.strt.setReadOnly(True)\r\n self.strt.setButtonSymbols(2)\r\n self.strt.setValue(samt)\r\n \r\n \r\n layout.addWidget(self.strt,4,0)\r\n layout.addWidget(stext,3,0)\r\n \r\n def recalcsum(self,unit,num=None):\r\n# print(unit,num)\r\n if num is None:\r\n self.values[unit]=float(self.units[unit].num)\r\n else:\r\n self.values[unit]=float(num)\r\n# print(self.values)\r\n tsum=sum([self.values[i]*pcash[i]['amt'] for i in self.values.keys()])\r\n self.total.setValue(tsum)\r\n# self.total.setMinimumHeight(300)\r\n self.checksum()\r\n def checksum(self):\r\n if self.total.value()>0 and self.epos.value()>0:\r\n if (self.total.value()-self.strt.value())>=self.epos.value():\r\n sheet=\"background-color: green;color:white; \"\r\n \r\n else:\r\n sheet=\"background-color: red; color:black\"\r\n else:\r\n sheet=\"background-color: white;color:black;\"\r\n \r\n self.total.setStyleSheet(sheet)\r\n \r\nclass unit(QGroupBox):#base class which will be duplicated for each cash amount widget\r\n \r\n numchanged=QtCore.pyqtSignal(int)\r\n def __init__(self,amount):\r\n super(unit,self).__init__()\r\n self.box=QGroupBox()\r\n self.bbox=QGroupBox()\r\n self.bbox.setLayout(QVBoxLayout())\r\n self.setLayout(QHBoxLayout())\r\n w=80\r\n self.setAlignment(QtCore.Qt.AlignCenter)\r\n self.layout().addWidget(self.bbox)\r\n unitheight=30\r\n self.arrs=[QPushButton(text='+',maximumWidth=w,minimumHeight=unitheight),QPushButton(text='-',maximumWidth=w,minimumHeight=unitheight)]\r\n self.bbox.layout().addWidget(self.arrs[0],0,QtCore.Qt.AlignCenter)\r\n self.arrs[0].clicked.connect(lambda:self.numinp.setValue(self.numinp.value()+1))\r\n self.arrs[1].clicked.connect(lambda:self.numinp.setValue(self.numinp.value()-1))\r\n self.layout().addWidget(self.box,1)\r\n self.bbox.layout().addWidget(self.arrs[1],1,QtCore.Qt.AlignCenter)\r\n self.setTitle(amount)\r\n self.str=amount#string amount\r\n self.val=cashunits[amount]['amt']#numerical amount this unit is worth\r\n self.num=0#num is better # to keep as base than $ amt\r\n self.box.setLayout(QHBoxLayout())\r\n self.cashinp=QDoubleSpinBox(prefix='$',maximum=10000,minimumHeight=50)\r\n# self.cashinp.focusChanged.connect(self.cashinp.selectAll)\r\n self.cashinp.setSingleStep(self.val)\r\n self.arrs[0]\r\n \r\n self.cashinp.setButtonSymbols(0)#sets symbols to +/-\r\n# self.cashinp.setValidator(QtGui.QDoubleValidator(bottom=0))\r\n self.cashinp.valueChanged.connect(lambda:self.updateunit('cash'))\r\n self.box.layout().addWidget(self.cashinp)\r\n self.numinp=QSpinBox(prefix='num: ',maximum=1000,minimumHeight=50)\r\n self.numinp.setSingleStep(1)\r\n# self.numinp.setValidator(QtGui.or(bottom=0))\r\n self.numinp.valueChanged.connect(lambda:self.updateunit('num'))\r\n self.box.layout().addWidget(self.numinp)\r\n self.img=''\r\n# print('test')\r\n \r\n \r\n @QtCore.pyqtSlot()#makes it editable on input outside \r\n def updateunit(self,fromslot):#Ties inputs together\r\n if fromslot=='cash' or fromslot=='both':#make sure divisible right\r\n cash=float(self.cashinp.value())\r\n \r\n if cash!='':\r\n self.num=int(cash/self.val)\r\n self.numinp.setValue(self.num)\r\n elif fromslot=='num' or fromslot=='both':\r\n num=self.numinp.value()\r\n if num!='' :#takes off floating spacebars\r\n # if valid:\r\n self.num=num\r\n self.cashinp.setValue(round(float(num)*float(self.val),3))\r\n else:\r\n self.cashinp.clear()\r\n \r\n self.numchanged.emit(self.num)\r\n \r\n \r\ndef cashrun():\r\n app=0 \r\n #windthread=QtCore.QThread()\r\n \r\n app=QApplication(sys.argv)\r\n wind=CashWindow()\r\n app.exec_()\r\n \r\n", "sub_path": "OHCashcount.py", "file_name": "OHCashcount.py", "file_ext": "py", "file_size_in_byte": 9462, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pandas.DataFrame", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 154, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 154, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 162, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 162, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 166, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 166, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 170, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 170, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 194, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 194, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 218, "usage_type": "attribute"}]} +{"seq_id": "496743872", "text": "\"\"\"ACSTOOLS regression test helpers.\"\"\"\n\nimport os\n\nimport pytest\nfrom ci_watson.artifactory_helpers import get_bigdata\nfrom ci_watson.hst_helpers import raw_from_asn, ref_from_image, download_crds\n\nfrom astropy.io import fits\nfrom astropy.io.fits import FITSDiff\n\n__all__ = ['calref_from_image', 'BaseACSTOOLS']\n\n\ndef calref_from_image(input_image):\n \"\"\"\n Return a list of reference filenames, as defined in the primary\n header of the given input image, necessary for calibration.\n This is mostly needed for destriping tools.\n \"\"\"\n\n # NOTE: Add additional mapping as needed.\n # Map *CORR to associated CRDS reference file.\n corr_lookup = {\n 'DQICORR': ['BPIXTAB', 'SNKCFILE'],\n 'ATODCORR': ['ATODTAB'],\n 'BLEVCORR': ['OSCNTAB'],\n 'SINKCORR': ['SNKCFILE'],\n 'BIASCORR': ['BIASFILE'],\n 'PCTECORR': ['PCTETAB', 'DRKCFILE', 'BIACFILE'],\n 'FLSHCORR': ['FLSHFILE'],\n 'CRCORR': ['CRREJTAB'],\n 'SHADCORR': ['SHADFILE'],\n 'DARKCORR': ['DARKFILE', 'TDCTAB'],\n 'FLATCORR': ['PFLTFILE', 'DFLTFILE', 'LFLTFILE'],\n 'PHOTCORR': ['IMPHTTAB'],\n 'LFLGCORR': ['MLINTAB'],\n 'GLINCORR': ['MLINTAB'],\n 'NLINCORR': ['NLINFILE'],\n 'ZSIGCORR': ['DARKFILE', 'NLINFILE'],\n 'WAVECORR': ['LAMPTAB', 'WCPTAB', 'SDCTAB'],\n 'SGEOCORR': ['SDSTFILE'],\n 'X1DCORR': ['XTRACTAB', 'SDCTAB'],\n 'SC2DCORR': ['CDSTAB', 'ECHSCTAB', 'EXSTAB', 'RIPTAB', 'HALOTAB',\n 'TELTAB', 'SRWTAB'],\n 'BACKCORR': ['XTRACTAB'],\n 'FLUXCORR': ['APERTAB', 'PHOTTAB', 'PCTAB', 'TDSTAB']}\n\n hdr = fits.getheader(input_image, ext=0)\n\n # Mandatory CRDS reference file.\n # Destriping tries to ingest some *FILE regardless of *CORR.\n ref_files = ref_from_image(input_image, ['CCDTAB', 'DARKFILE', 'PFLTFILE'])\n\n for step in corr_lookup:\n # Not all images have the CORR step and it is not always on.\n # Destriping also does reverse-calib.\n if ((step not in hdr) or\n (hdr[step].strip().upper() not in ('PERFORM', 'COMPLETE'))):\n continue\n\n ref_files += ref_from_image(input_image, corr_lookup[step])\n\n return list(set(ref_files)) # Remove duplicates\n\n\n# Base class for actual tests.\n# NOTE: Named in a way so pytest will not pick them up here.\n# NOTE: bigdata marker requires TEST_BIGDATA environment variable to\n# point to a valid big data directory, whether locally or on Artifactory.\n# NOTE: envopt would point tests to \"dev\" or \"stable\".\n# NOTE: _jail fixture ensures each test runs in a clean tmpdir.\n@pytest.mark.bigdata\n@pytest.mark.usefixtures('_jail', 'envopt')\nclass BaseACSTOOLS:\n # Timeout in seconds for file downloads.\n timeout = 30\n\n instrument = 'acs'\n ignore_keywords = ['filename', 'date', 'iraf-tlm', 'fitsdate',\n 'opus_ver', 'cal_ver', 'proctime', 'history']\n\n # To be defined by test class in actual test modules.\n detector = ''\n\n @pytest.fixture(autouse=True)\n def setup_class(self, envopt):\n \"\"\"\n Class-level setup that is done at the beginning of the test.\n\n Parameters\n ----------\n envopt : {'dev', 'stable'}\n This is a ``pytest`` fixture that defines the test\n environment in which input and truth files reside.\n\n \"\"\"\n self.env = envopt\n\n def get_input_file(self, filename):\n \"\"\"\n Copy input file (ASN, RAW, etc) into the working directory.\n If ASN is given, RAW files in the ASN table is also copied.\n The associated CRDS reference files are also copied or\n downloaded, if necessary.\n\n Data directory layout for CALCOS::\n\n detector/\n input/\n truth/\n\n Parameters\n ----------\n filename : str\n Filename of the ASN/RAW/etc to copy over, along with its\n associated files.\n\n \"\"\"\n # Copy over main input file.\n dest = get_bigdata('scsb-acstools', self.env, self.detector, 'input',\n filename)\n\n # For historical reason, need to remove \".orig\" suffix if it exists.\n if filename.endswith('.orig'):\n newfilename = filename.rstrip('.orig')\n os.rename(filename, newfilename)\n filename = newfilename\n\n if filename.endswith('_asn.fits'):\n all_raws = raw_from_asn(filename)\n for raw in all_raws: # Download RAWs in ASN.\n get_bigdata('scsb-acstools', self.env, self.detector, 'input',\n raw)\n else:\n all_raws = [filename]\n\n first_pass = ('JENKINS_URL' in os.environ and\n 'ssbjenkins' in os.environ['JENKINS_URL'])\n\n for raw in all_raws:\n ref_files = calref_from_image(raw)\n\n for ref_file in ref_files:\n # Special reference files that live with inputs.\n if ('$' not in ref_file and\n os.path.basename(ref_file) == ref_file):\n get_bigdata('scsb-acstools', self.env, self.detector,\n 'input', ref_file)\n continue\n\n # Jenkins cannot see Central Storage on push event,\n # and somehow setting, say, jref to \".\" does not work anymore.\n # So, we need this hack.\n if '$' in ref_file and first_pass:\n first_pass = False\n if not os.path.isdir('/grp/hst/cdbs'):\n ref_path = os.path.dirname(dest) + os.sep\n var = ref_file.split('$')[0]\n os.environ[var] = ref_path # hacky hack hack\n\n # Download reference files, if needed only.\n download_crds(ref_file, timeout=self.timeout)\n\n def compare_outputs(self, outputs, atol=0, rtol=1e-7, raise_error=True,\n ignore_keywords_overwrite=None):\n \"\"\"\n Compare ACSTOOLS output with \"truth\" using ``fitsdiff``.\n\n Parameters\n ----------\n outputs : list of tuple\n A list of tuples, each containing filename (without path)\n of CALXXX output and truth, in that order. Example::\n\n [('output1.fits', 'truth1.fits'),\n ('output2.fits', 'truth2.fits'),\n ...]\n\n atol, rtol : float\n Absolute and relative tolerance for data comparison.\n\n raise_error : bool\n Raise ``AssertionError`` if difference is found.\n\n ignore_keywords_overwrite : list of str or `None`\n If not `None`, these will overwrite\n ``self.ignore_keywords`` for the calling test.\n\n Returns\n -------\n report : str\n Report from ``fitsdiff``.\n This is part of error message if ``raise_error=True``.\n\n \"\"\"\n all_okay = True\n creature_report = ''\n\n if ignore_keywords_overwrite is None:\n ignore_keywords = self.ignore_keywords\n else:\n ignore_keywords = ignore_keywords_overwrite\n\n for actual, desired in outputs:\n desired = get_bigdata('scsb-acstools', self.env, self.detector,\n 'truth', desired)\n fdiff = FITSDiff(actual, desired, rtol=rtol, atol=atol,\n ignore_keywords=ignore_keywords)\n creature_report += fdiff.report()\n\n if not fdiff.identical and all_okay:\n all_okay = False\n\n if not all_okay and raise_error:\n raise AssertionError(os.linesep + creature_report)\n\n return creature_report\n", "sub_path": "acstools/tests/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 7731, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "astropy.io.fits.getheader", "line_number": 49, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 49, "usage_type": "name"}, {"api_name": "ci_watson.hst_helpers.ref_from_image", "line_number": 53, "usage_type": "call"}, {"api_name": "ci_watson.hst_helpers.ref_from_image", "line_number": 62, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 86, "usage_type": "call"}, {"api_name": "ci_watson.artifactory_helpers.get_bigdata", "line_number": 121, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 127, "usage_type": "call"}, {"api_name": "ci_watson.hst_helpers.raw_from_asn", "line_number": 131, "usage_type": "call"}, {"api_name": "ci_watson.artifactory_helpers.get_bigdata", "line_number": 133, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 139, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "ci_watson.artifactory_helpers.get_bigdata", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 160, "usage_type": "attribute"}, {"api_name": "ci_watson.hst_helpers.download_crds", "line_number": 163, "usage_type": "call"}, {"api_name": "ci_watson.artifactory_helpers.get_bigdata", "line_number": 206, "usage_type": "call"}, {"api_name": "astropy.io.fits.FITSDiff", "line_number": 208, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 216, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pytest.mark.usefixtures", "line_number": 74, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 74, "usage_type": "attribute"}]} +{"seq_id": "214790584", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 28 13:51:31 2016\r\n\r\n@author: lemitri\r\n\"\"\"\r\n\r\nimport os\r\nimport pandas as pd\r\nimport scipy.stats as sp\r\n#import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sb\r\nsb.set_style('ticks')\r\n\r\nimport gurobipy as gb\r\nimport itertools as it\r\n\r\nimport numpy as np\r\n\r\n#%% NORDPOOL MC clearing for each scenario IN SAMPLE !!!!\r\n\r\nclass expando(object):\r\n '''\r\n A small class which can have attributes set\r\n '''\r\n pass\r\n\r\nclass sequential_elec_scenario:\r\n def __init__(self,s0,W_max):\r\n self.data = expando()\r\n self.variables = expando()\r\n self.constraints = expando()\r\n self._load_data(s0,W_max)\r\n self._build_model()\r\n \r\n def optimize(self):\r\n self.model.optimize()\r\n \r\n def _load_data(self,s0,W_max):\r\n \r\n \r\n #indexes\r\n self.data.W_max = W_max\r\n self.data.time = time\r\n self.data.time_list=time_list\r\n# self.data.node=node\r\n# self.data.line=line\r\n# self.data.pipe=pipe\r\n self.data.gen=gen\r\n #self.data.heat_storage=heat_storage\r\n #self.data.elec_storage=elec_storage\r\n self.data.heat_pump =heat_pump\r\n self.data.wind=wind\r\n #self.data.heat_only = heat_only\r\n #self.data.CHP_sorted = CHP_sorted\r\n self.data.CHP = CHP\r\n self.data.rho_elec = rho_elec\r\n \r\n # scenarios\r\n self.data.elec_load = {t:elec_load_scenario_kmeans[scenario_dic_kmeans[s0][0],t] for t in time} #SCENARIOS REALIZATIONS!!!!!!!!!!!!!!!!! \r\n self.data.wind_scenario = {(w,t):wind_scenario_kmeans[scenario_dic_kmeans[s0][1],w,t] for w in wind for t in time} #SCENARIOS REALIZATIONS!!!!!!!!!!!!!!!!!\r\n self.data.alpha = {(g,t):alpha_scenario_kmeans[scenario_dic_kmeans[s0][2],g,t] for g in CHP+gen+wind for t in time} #SCENARIOS REALIZATIONS!!!!!!!!!!!!!!!!!\r\n \r\n # Elec station parameters\r\n self.data.elec_maxprod = {(g,t):elec_maxprod_sequential[W_max,g,t] for g in gen+wind+CHP+heat_pump for t in time}\r\n self.data.elec_minprod = {(g,t):elec_minprod_sequential[W_max,g,t] for g in CHP for t in time}\r\n\r\n self.data.alpha_HP = {(g,t):100 for g in heat_pump for t in time}\r\n\r\n def _build_model(self):\r\n \r\n self.model = gb.Model()\r\n self._build_variables()\r\n self._build_objective()\r\n self._build_constraints()\r\n \r\n def _build_variables(self):\r\n \r\n #indexes\r\n m = self.model\r\n time = self.data.time\r\n time_list = self.data.time_list\r\n# self.data.node=node\r\n# self.data.line=line\r\n# self.data.pipe=pipe\r\n gen = self.data.gen\r\n #self.data.heat_storage=heat_storage\r\n #self.data.elec_storage=elec_storage\r\n heat_pump = self.data.heat_pump \r\n wind = self.data.wind\r\n #heat_only = self.data.heat_only \r\n #CHP_sorted = self.data.CHP_sorted\r\n CHP = self.data.CHP \r\n #self.data.producers=producers\r\n #self.data.heat_station=heat_station\r\n #self.data.elec_station=elec_station\r\n #self.data.heat_exchanger_station = heat_exchanger_station\r\n \r\n\r\n #electricity market optimization variables : primal variables\r\n \r\n self.variables.P = {} # electricity production from electricity generators\r\n for t in time:\r\n for g in CHP+gen+wind:\r\n self.variables.P[g,t] = m.addVar(lb=0,ub=self.data.elec_maxprod[g,t],name='elec prod at marginal cost({0},{1})'.format(g,t)) # dispatch of electricity generators\r\n\r\n\r\n self.variables.HP_load = {} # electricity production from electricity generators\r\n for t in time:\r\n for g in heat_pump:\r\n self.variables.HP_load[g,t] = m.addVar(lb=0,ub=self.data.elec_maxprod[g,t],name='elec prod at marginal cost({0},{1})'.format(g,t))\r\n \r\n \r\n self.variables.P_min = {} # electricity production from electricity generators\r\n for t in time:\r\n for g in CHP:\r\n self.variables.P_min[g,t] = m.addVar(lb=0,ub=self.data.elec_minprod[g,t],name='elec MIN prod for CHPs({0},{1})'.format(g,t)) # dispatch of electricity generators\r\n \r\n m.update()\r\n \r\n \r\n def _build_objective(self): # building the objective function for the heat maret clearing\r\n \r\n #indexes\r\n m = self.model\r\n time = self.data.time\r\n time_list = self.data.time_list\r\n# self.data.node=node\r\n# self.data.line=line\r\n# self.data.pipe=pipe\r\n gen = self.data.gen\r\n #self.data.heat_storage=heat_storage\r\n #self.data.elec_storage=elec_storage\r\n #heat_pump = self.data.heat_pump \r\n wind = self.data.wind\r\n #heat_only = self.data.heat_only \r\n #CHP_sorted = self.data.CHP_sorted\r\n CHP = self.data.CHP \r\n #self.data.producers=producers\r\n #self.data.heat_station=heat_station\r\n #self.data.elec_station=elec_station\r\n #self.data.heat_exchanger_station = heat_exchanger_station\r\n \r\n m.setObjective(-gb.quicksum(self.data.alpha_HP[g,t]*self.variables.HP_load[g,t] for t in time for g in heat_pump)+gb.quicksum(self.data.alpha[g,t]*self.variables.P[g,t] for t in time for g in gen+wind)+gb.quicksum(self.data.alpha[g,t]*self.data.rho_elec[g]*self.variables.P[g,t]-300*self.variables.P_min[g,t] for t in time for g in CHP),\r\n gb.GRB.MINIMIZE)\r\n \r\n \r\n def _build_constraints(self):\r\n \r\n #indexes\r\n m = self.model\r\n time = self.data.time\r\n time_list = self.data.time_list\r\n gen = self.data.gen\r\n heat_pump = self.data.heat_pump \r\n wind = self.data.wind\r\n CHP = self.data.CHP \r\n \r\n # wind realization\r\n\r\n self.constraints.wind_scenario = {}\r\n \r\n for t in time:\r\n for g in wind: \r\n \r\n self.constraints.wind_scenario[g,t] = m.addConstr(\r\n self.variables.P[g,t],\r\n gb.GRB.LESS_EQUAL,\r\n self.data.wind_scenario[g,t]*self.data.elec_maxprod[g,t],name='wind scenario({0},{1})'.format(g,t))\r\n \r\n # elec balance\r\n\r\n# self.constraints.elec_balance = {}\r\n# \r\n# for t in time:\r\n# for n in node: \r\n# \r\n# self.constraints.elec_balance[n,t] = m.addConstr(\r\n# gb.quicksum(self.variables.storage_plus[g,t] - self.variables.storage_moins[g,t] for g in self.data.elec_storage_node[n])+gb.quicksum(self.variables.P[g,t] for g in self.data.elec_station_node[n])+gb.quicksum(self.variables.flow_line[l,t] for l in self.data.line_end[n])-gb.quicksum(self.variables.flow_line[l,t] for l in self.data.line_start[n])-self.data.elec_load[n,t],\r\n# gb.GRB.EQUAL,\r\n# 0,name='elec balance({0},{1})'.format(n,t)) \r\n\r\n self.constraints.elec_balance = {}\r\n \r\n for t in time:\r\n \r\n self.constraints.elec_balance[t] = m.addConstr(\r\n gb.quicksum(self.variables.P_min[g,t] for g in CHP)+gb.quicksum(self.variables.P[g,t] for g in CHP+gen+wind),\r\n gb.GRB.EQUAL,\r\n self.data.elec_load[t]+gb.quicksum(self.variables.HP_load[g,t] for g in heat_pump),name='elec balance({0})'.format(t)) \r\n\r\n\r\n#%% solve the MC for different realisations of the scenarios \r\n\r\n#elec_cost_sequential_scenario = {}\r\n#spot_price_sequential_scenario = {}\r\n#\r\n#P_sequential_scenario = {}\r\n#\r\n#wind_curtailment_sequential_scenario = {}\r\n#\r\n#elec_cost_sequential_average = {}\r\n#spot_price_sequential_average = {}\r\n#P_sequential_average = {}\r\n#wind_curtailment_sequential_average = {}\r\n\r\nfor W_max in W_range: \r\n \r\n for s0 in scenario_kmeans:\r\n \r\n dispatch = sequential_elec_scenario(s0,W_max)\r\n dispatch.model.params.OutputFlag = 0\r\n dispatch.optimize()\r\n\r\n \r\n for g in gen+wind:\r\n for t in time:\r\n \r\n P_sequential_scenario[W_max,s0,g,t]=dispatch.variables.P[g,t].x\r\n \r\n for g in CHP:\r\n for t in time:\r\n \r\n P_sequential_scenario[W_max,s0,g,t]=dispatch.variables.P_min[g,t].x + dispatch.variables.P[g,t].x\r\n \r\n for g in heat_pump:\r\n for t in time:\r\n P_sequential_scenario[W_max,s0,g,t]=dispatch.variables.HP_load[g,t].x\r\n \r\n \r\n for t in time:\r\n\r\n spot_price_sequential_scenario[W_max,s0,t] = dispatch.constraints.elec_balance[t].Pi\r\n\r\n wind_curtailment_sequential_scenario[W_max,s0] = sum(W_max*wind_scenario_kmeans[scenario_dic_kmeans[s0][1],w,t]- P_sequential_scenario[W_max,s0,w,t] for w in wind for t in time)\r\n\r\n#%%\r\n\r\nW_range=[50,75,100,125,150,175,200,225,250,275,300]\r\n\r\nfor W_max in W_range: \r\n \r\n for s0 in scenario_kmeans:\r\n \r\n\r\n elec_cost_sequential_scenario[W_max,s0] = sum(P_sequential_scenario[W_max,s0,g,t]*alpha_scenario_kmeans[scenario_dic_kmeans[s0][2],g,t] for g in gen for t in time) + sum(P_sequential_scenario[W_max,s0,g,t]*rho_elec[g]*alpha_scenario_kmeans[scenario_dic_kmeans[s0][2],g,t] for g in CHP for t in time) \r\n # - sum(Q_sequential[W_max,g,t]/COP[g]*spot_price_sequential_scenario[W_max,s0,t] for g in heat_pump for t in time)\r\n\r\n# for t in time:\r\n# for h in CHP+gen+wind+heat_pump:\r\n# \r\n# P_sequential_average[W_max,g,t]=sum(P_sequential_scenario[W_max,s0,g,t] for s0 in scenario_kmeans)/S_all_kmeans\r\n# \r\n# \r\n# spot_price_sequential_average[W_max,t] = sum(spot_price_sequential_scenario[W_max,s,t] for s in scenario_kmeans)/S_all_kmeans\r\n#\r\n# wind_curtailment_sequential_average[W_max] = sum(wind_curtailment_sequential_scenario[W_max,s] for s in scenario_kmeans)/S_all_kmeans\r\n\r\n elec_cost_sequential_average[W_max] = sum(elec_cost_sequential_scenario[W_max,s] for s in scenario_kmeans)/S_all_kmeans\r\n", "sub_path": "step 2 sequential elec market.py", "file_name": "step 2 sequential elec market.py", "file_ext": "py", "file_size_in_byte": 10178, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "seaborn.set_style", "line_number": 14, "usage_type": "call"}, {"api_name": "gurobipy.Model", "line_number": 73, "usage_type": "call"}, {"api_name": "gurobipy.quicksum", "line_number": 145, "usage_type": "call"}, {"api_name": "gurobipy.GRB", "line_number": 146, "usage_type": "attribute"}, {"api_name": "gurobipy.GRB", "line_number": 169, "usage_type": "attribute"}, {"api_name": "gurobipy.quicksum", "line_number": 189, "usage_type": "call"}, {"api_name": "gurobipy.GRB", "line_number": 190, "usage_type": "attribute"}, {"api_name": "gurobipy.quicksum", "line_number": 191, "usage_type": "call"}]} +{"seq_id": "129751820", "text": "import requests\nimport sys\n\n\"\"\"\nTakes a text file containing a corpus as argument and returns the ziped tarball containing the .lm and .dic,\nusing http://www.speech.cs.cmu.edu/tools/lmtool-new.html \n\"\"\"\n\n\ncorp_file = sys.argv[1]\nf = open(corp_file,'rb')\nprint(f)\nurl = \"http://www.speech.cs.cmu.edu/cgi-bin/tools/lmtool/run\"\nfiles = {'formtype': 'simple','corpus': f}\n\nr = requests.post(url,files=files)\nfor lines in r.text.split(\"\\n\"): # find download link\n print(lines)\n if ' is the compressed version.\",\"\")\ndl_link = dl_link.split(\">\")[0].strip()\ndl_link = dl_link[1:-1]\nprint(\"dl_link:\",dl_link)\ndict_responce = requests.get(\n dl_link, allow_redirects=True)\nout_file_name = sys.argv[1].replace(\".txt\",\"_model.tgz\")\nopen(out_file_name, 'wb').write(\n dict_responce.content)\n", "sub_path": "auto_speech_recognition/corprus_creation/get_model.py", "file_name": "get_model.py", "file_ext": "py", "file_size_in_byte": 969, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "117248790", "text": "# Image augmentation 兩個主要的功能包含『彌補資料不足』以及『避免Overfitting』 :\n# 該如何自己創造新的資料呢?最簡單的方式就是透過Image augmentation,我們藉由旋轉、裁切、增加噪點、白化等技術,如此一來,我們就硬生生地增加了許多的資料。\n# 訓練一個分類器時,大家應該很容易遇到Overfitting的狀況,也就是對Training Data過於完美的擬合,此時,透過適當的圖像增強,也能降低Overfitting 的可能性。\n# Image Augmentation 是常見的影像前處理,然而也要避免一些錯誤的使用情境:\n# 如訓練數字模型時使用垂直翻轉,這樣會造成6、9之間的訓練問題,\n# 如輸入影像為小尺寸(ex. 32*32),結果隨機裁切16個像素,如此幾乎所有的內容都被裁切導致模型無法學到有用資訊。\n# --------------------------------------------------------------------------------------------------------------------- #\n# 水平與垂直翻轉 (Flip) :\n# xy 軸順序顛倒\n# --------------------------------------------- #\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n# --------------------------------------------- #\npicture = cv2.imread('unnamed.jpg',cv2.IMREAD_COLOR)\n# --------------------------------------------- #\n# 表示圖片channel方法 : img[:,:,1] or img[...,1]\n# ::-1代表從 end 走到 start (倒序)\n# [::-1,:,:] 代表對y軸倒序(高)\n# [:,::-1,:] 代表對x軸倒序(寬)\n# [:,:,::-1] 代表對channel倒序\nfilp_vertical = picture[::-1,:,:]\nfilp_horizontal = picture[:,::-1,:]\nfilp_horizontal_vertical = filp_horizontal[::-1,:,:]\n# --------------------------------------------- #\nimg_combine_first = np.hstack((picture,filp_horizontal ))\nimg_combine_second = np.hstack((filp_vertical,filp_horizontal_vertical))\nimg_combine_first = cv2.cvtColor(img_combine_first, cv2.COLOR_BGR2RGB)\nimg_combine_second = cv2.cvtColor(img_combine_second, cv2.COLOR_BGR2RGB)\nplt.figure()\nplt.subplot(2,1,1)\nplt.imshow(img_combine_first)\nplt.axis('off')\nplt.subplot(2,1,2)\nplt.imshow(img_combine_second)\nplt.axis('off')\nplt.show()\n# --------------------------------------------- #\nimg_combine_all = np.vstack((img_combine_first,img_combine_second))\nimg_combine_all = cv2.cvtColor(img_combine_all, cv2.COLOR_BGR2RGB)\ncv2.imshow('Combine', img_combine_all)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n# --------------------------------------------- #\n# --------------------------------------------------------------------------------------------------------------------- #\n# 縮放操作 (Scale) - OpenCV\n# 因為縮小跟放大要參考周圍的 pixel 值\n# 經過統計與運算去減少 / 生成新的 pixel 值\n# interpolation\t說明 : [ 放操作的方式 (interpolation) 會影響處理的速度與圖片品質 ]\n# INTER_NEAREST\t-> 最近鄰插值\n# INTER_LINEAR\t-> 雙線性插值(預設)\n# INTER_AREA\t-> 使用像素區域關係進行重採樣。它可能是圖像抽取的首選方法,因為它會產生無雲紋理(波紋)的結果。 但是當圖像縮放時,它類似於INTER_NEAREST方法。\n# INTER_CUBIC\t-> 4x4像素鄰域的雙三次插值\n# INTER_LANCZOS4-> 8x8像素鄰域的Lanczos插值\n# --------------------------------------------- #\n# 檢查原始圖片大小-openCV\nsp = picture.shape # [高|宽|像素值由三种原色构成]\nprint(sp)\n# --------------------------------------------- #\n# 縮放操作 (Scale) - 以不同內插法做縮放\n# 有兩中操作方式\n# cv2.resize(圖,直接定義輸出的大小,哪種運算方法)\n# cv2.resize(圖,None,fx(輸出圖片的比列是原比例的多少倍),fy(輸出圖片的比列是原比例的多少倍),哪種運算方法)\npicture_scale_change1 = cv2.resize(picture,(512,512),interpolation=cv2.INTER_NEAREST)\npicture_scale_change2 = cv2.resize(picture,(512,512),interpolation=cv2.INTER_LINEAR)\npicture_scale_change3 = cv2.resize(picture,(512,512),interpolation=cv2.INTER_AREA)\npicture_scale_change4 = cv2.resize(picture,(512,512),interpolation=cv2.INTER_CUBIC)\npicture_scale_change5 = cv2.resize(picture,(512,512),interpolation=cv2.INTER_LANCZOS4)\nimg1_rgb = cv2.cvtColor(picture_scale_change1, cv2.COLOR_BGR2RGB)\nimg2_rgb = cv2.cvtColor(picture_scale_change2, cv2.COLOR_BGR2RGB)\nimg3_rgb = cv2.cvtColor(picture_scale_change3, cv2.COLOR_BGR2RGB)\nimg4_rgb = cv2.cvtColor(picture_scale_change4, cv2.COLOR_BGR2RGB)\nimg5_rgb = cv2.cvtColor(picture_scale_change5, cv2.COLOR_BGR2RGB)\ntitles = ['Original Image', 'INTER_NEAREST', 'INTER_LINEAR', 'INTER_AREA', 'INTER_CUBIC', 'INTER_LANCZOS4']\nimages = [picture, img1_rgb, img2_rgb, img3_rgb, img4_rgb, img5_rgb]\nfor i in range(6):\n plt.subplot(2, 3, i+1)\n plt.imshow(images[i])\n plt.title(titles[i])\n plt.xticks([]), plt.yticks([])\nplt.show()\n# --------------------------------------------- #\n# 如果是要縮小圖片的話,通常 INTER_AREA 使用效果較佳。\n# 如果是要放大圖片的話,通常 INTER_CUBIC 使用效果較佳,次等則是 INTER_LINEAR。\n# 如果要追求速度的話,通常使用 INTER_NEAREST。\n# --------------------------------------------- #\n# INTER_AREA vs. INTER_CUBIC\ntitles123 = [ 'INTER_LINEAR', 'INTER_CUBIC', ]\nimages123 = [ img3_rgb, img4_rgb]\nfor i in range(2):\n plt.subplot(1, 2, i+1)\n plt.imshow(images123[i])\n plt.title(titles123[i])\n plt.xticks([]), plt.yticks([])\nplt.show()\n# 可以明顯看到 INTER_AREA\n# 較模糊且有鋸齒邊緣 (左)\n# -------------------------------------------------------------------------------------------#\n# 法(二) 等比例放大、縮小\n# 將圖片縮小成原本的 20%\n\n# area\nstart_time = time.time() # 計算時間用\nimg_area_scale = cv2.resize(picture, None, fx=1.6, fy=1.6,interpolation=cv2.INTER_AREA)\nprint('INTER_NEAREST zoom cost {}'.format(time.time() - start_time))\n\n# cubic\nstart_time = time.time() # 計算時間用\nimg_cubic_scale = cv2.resize(picture, None, fx=1.6, fy=1.6,interpolation=cv2.INTER_CUBIC)\nprint('INTER_CUBIC zoom cost {}'.format(time.time() - start_time))\nimg5_rgb = cv2.cvtColor(img_area_scale, cv2.COLOR_BGR2RGB)\nimg6_rgb = cv2.cvtColor(img_cubic_scale, cv2.COLOR_BGR2RGB)\nimg_zoom = np.hstack((img_area_scale, img_cubic_scale))\ntitles123 = [ 'INTER_LINEAR', 'INTER_CUBIC', ]\nimages1234 = [img5_rgb,img6_rgb]\nfor i in range(2):\n plt.subplot(1, 2, i+1)\n plt.imshow(images1234[i])\n plt.title(titles123[i])\n plt.xticks([]), plt.yticks([])\nplt.show()\n# --------------------------------------------- #\n# --------------------------------------------------------------------------------------------------------------------- #\n# 平移操作 (Translation Transformation) :\n# 所謂的平移操作的意義為物體往某個向量方向移動,但是其形狀,結構與視角都不發生改變\n# 方法一:手動做 xy 軸的四則運算
取得移動後的位置 (慢)\n# 方法二:以矩陣運算方式操作 (快)\n# 圖片與 Transformation Matrix 相乘\n# 一次性操作就可以得到平移後的值,來得到新的pixel位置\n# --------------------------------------------- #\n# 平移操作的向量移動方式 : (利用這兩個方程式轉成3x3的矩陣)\n# x' = ax+cy+e\n# y' = bx+dy+f\n# x, y 軸的移動不會考慮 y, x 軸的值:c = b = 0\n# x, y 軸的移動不會做 scale:a = d = 1\n# e = x 軸移動多少 pixel\n# f = y 軸移動多少 pixel\n# --------------------------------------------- #\n# OpenCV :\n# 在openCV中,當我們給定不一樣的 Matrix 就可以做不一樣的 Transformation\n# --------------------------------------------- #\n# 用numPy的陣列作為構建基礎,專門用來處理矩陣,它的運算效率比列表更高效。\n# np.array:一種多維陣列物件\n# np.array(a,b) : a * b 矩陣\n# 需要知道你所處理的資料的大致類型是浮點數、複數、���數、布林值、字串,還是普通的 python 對象。當需要控制資料在記憶體和磁片中的存儲方式時,就得瞭解如何控制存儲類型。\n# 所以,dtype(資料類型)是一個特殊的物件,它含有 ndarray 將一塊記憶體解釋為特定資料類型所需的資訊。\n# 故這邊直接使用的是np.float32\n# 定義矩阵 向右平移10个像素, 向下平移50个像素\nM = np.float32([[1, 0, 10], [0, 1, 50]]) #透過 np.array 產生平移矩陣\n# 用OpenCV進行2D變換\n# picture.shape[0-2] : 寬、高、channel\nshifted = cv2.warpAffine(picture, M, (picture.shape[1], picture.shape[0]))\ncv2.imshow('Translation', shifted)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n# --------------------------------------------- #\n# 封裝看看:(def)\ndef translate(image, x, y):\n M = np.float32([[1, 0, x], [0, 1, y]])\n shifte = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))\n return shifte\nshifte1 = translate(picture, 10, 30)\ncv2.imshow('Translation', shifte1)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n# --------------------------------------------- #\n\n\n\n\n\n\n# --------------------------------------------------------------------------------------------------------------------- #\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "pratice/lession4_pratice.py", "file_name": "lession4_pratice.py", "file_ext": "py", "file_size_in_byte": 9076, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "cv2.imread", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.INTER_NEAREST", "line_number": 65, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 66, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 68, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.INTER_LANCZOS4", "line_number": 69, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 70, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 71, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 72, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 73, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 74, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "time.time", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 105, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 105, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 106, "usage_type": "call"}, {"api_name": "time.time", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 110, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 111, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 112, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 112, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 150, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 153, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 154, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 155, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 160, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 161, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 164, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 165, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 166, "usage_type": "call"}]} +{"seq_id": "305220058", "text": "import os\nimport pytest\nfrom requre.storage import PersistentObjectStorage\nfrom requre.utils import StorageMode\nfrom requre import RequreTestCase\n\nfrom ogr import PagureService\nfrom ogr.abstract import PRStatus, IssueStatus, CommitStatus\nfrom ogr.exceptions import PagureAPIException, OgrException\n\n\nclass PagureTests(RequreTestCase):\n def setUp(self):\n super().setUp()\n self.token = os.environ.get(\"PAGURE_TOKEN\")\n\n if PersistentObjectStorage().mode == StorageMode.write and (not self.token):\n raise EnvironmentError(\n \"You are in Requre write mode, please set PAGURE_TOKEN env variables\"\n )\n\n self.service = PagureService(token=self.token, instance_url=\"https://pagure.io\")\n self._user = None\n self._ogr_project = None\n self._ogr_fork = None\n\n @property\n def user(self):\n if not self._user:\n self._user = self.service.user.get_username()\n return self._user\n\n @property\n def ogr_project(self):\n if not self._ogr_project:\n self._ogr_project = self.service.get_project(\n namespace=None, repo=\"ogr-tests\"\n )\n return self._ogr_project\n\n @property\n def ogr_fork(self):\n if not self._ogr_fork:\n self._ogr_fork = self.service.get_project(\n namespace=None, repo=\"ogr-tests\", username=self.user, is_fork=True\n )\n return self._ogr_fork\n\n\nclass Comments(PagureTests):\n def test_pr_comments(self):\n pr_comments = self.ogr_project.get_pr_comments(pr_id=4)\n assert pr_comments\n print(pr_comments[0].body, pr_comments[1].body, pr_comments[2].body)\n assert len(pr_comments) == 6\n assert pr_comments[0].body.endswith(\"test\")\n\n def test_pr_comments_reversed(self):\n pr_comments = self.ogr_project.get_pr_comments(pr_id=4, reverse=True)\n assert pr_comments\n assert len(pr_comments) == 6\n assert pr_comments[2].body.endswith(\"me\")\n\n def test_pr_comments_filter(self):\n pr_comments = self.ogr_project.get_pr_comments(pr_id=4, filter_regex=\"me\")\n assert pr_comments\n assert len(pr_comments) == 4\n assert pr_comments[0].body == \"ignored comment\"\n\n pr_comments = self.ogr_project.get_pr_comments(\n pr_id=4, filter_regex=\"PR comment [0-9]*\"\n )\n assert pr_comments\n assert len(pr_comments) == 2\n assert pr_comments[0].body.endswith(\"aaaa\")\n\n def test_pr_comments_search(self):\n comment_match = self.ogr_project.search_in_pr(pr_id=4, filter_regex=\"New\")\n assert comment_match\n print(comment_match)\n assert comment_match[0] == \"New\"\n\n comment_match = self.ogr_project.search_in_pr(\n pr_id=4, filter_regex=\"Pull-Request has been merged by [a-z]*\"\n )\n print(comment_match)\n assert comment_match\n assert comment_match[0].startswith(\"Pull\")\n\n\nclass GenericCommands(PagureTests):\n def test_description(self):\n description = self.ogr_project.get_description()\n assert description.startswith(\"Testing repository for python-ogr package\")\n\n def test_branches(self):\n branches = self.ogr_project.get_branches()\n assert branches\n assert set(branches) == {\"master\"}\n\n def test_get_releases(self):\n releases = self.ogr_project.get_releases()\n assert len(releases) == 0\n\n def test_git_urls(self):\n urls = self.ogr_project.get_git_urls()\n assert urls\n assert len(urls) == 2\n assert \"git\" in urls\n assert \"ssh\" in urls\n assert urls[\"git\"] == \"https://pagure.io/ogr-tests.git\"\n assert urls[\"ssh\"].endswith(\"ssh://git@pagure.io/ogr-tests.git\")\n\n def test_username(self):\n # changed to check just lenght, because it is based who regenerated data files\n assert len(self.service.user.get_username()) > 3\n\n def test_get_file(self):\n file_content = self.ogr_project.get_file_content(\"README.rst\")\n assert file_content\n assert isinstance(file_content, str)\n assert \"This is a testing repo\" in file_content\n\n def test_nonexisting_file(self):\n with self.assertRaises(Exception) as _:\n self.ogr_project.get_file_content(\".blablabla_nonexisting_file\")\n\n def test_parent_project(self):\n assert self.ogr_fork.parent.namespace is None\n assert self.ogr_fork.parent.repo == \"ogr-tests\"\n\n def test_commit_statuses(self):\n flags = self.ogr_project.get_commit_statuses(\n commit=\"d87466de81c72231906a6597758f37f28830bb71\"\n )\n assert isinstance(flags, list)\n assert len(flags) == 0\n\n def test_get_owners(self):\n owners = self.ogr_fork.get_owners()\n assert [self.user] == owners\n\n def test_pr_permissions(self):\n owners = self.ogr_project.who_can_merge_pr()\n assert \"lachmanfrantisek\" in owners\n assert self.ogr_project.can_merge_pr(\"lachmanfrantisek\")\n\n def test_get_web_url(self):\n url = self.ogr_project.get_web_url()\n assert url == \"https://pagure.io/ogr-tests\"\n\n def test_full_repo_name(self):\n assert self.ogr_project.full_repo_name == \"ogr-tests\"\n assert (\n self.service.get_project(namespace=\"mbi\", repo=\"ansible\").full_repo_name\n == \"mbi/ansible\"\n )\n\n # test forks\n assert self.ogr_fork.full_repo_name == f\"fork/{self.user}/ogr-tests\"\n assert (\n self.service.get_project(\n namespace=\"Fedora-Infra\",\n repo=\"ansible\",\n username=self.user,\n is_fork=True,\n ).full_repo_name\n == f\"fork/{self.user}/Fedora-Infra/ansible\"\n )\n\n\nclass Service(PagureTests):\n def test_project_create(self):\n \"\"\"\n Remove https://pagure.io/$USERNAME/new-ogr-testing-repo before data regeneration\n \"\"\"\n name = \"new-ogr-testing-repo\"\n project = self.service.get_project(repo=name, namespace=None)\n assert not project.exists()\n\n new_project = self.service.project_create(repo=name)\n assert new_project.exists()\n assert new_project.repo == name\n\n project = self.service.get_project(repo=name, namespace=None)\n assert project.exists()\n\n def test_project_create_in_the_group(self):\n \"\"\"\n Remove https://pagure.io/packit-service/new-ogr-testing-repo-in-the-group\n before data regeneration\n \"\"\"\n name = \"new-ogr-testing-repo-in-the-group\"\n namespace = \"packit-service\"\n project = self.service.get_project(repo=name, namespace=namespace)\n assert not project.exists()\n\n new_project = self.service.project_create(repo=name, namespace=namespace)\n assert new_project.exists()\n assert new_project.repo == name\n\n project = self.service.get_project(repo=name, namespace=namespace)\n assert project.exists()\n\n def test_project_create_invalid_namespace(self):\n name = \"new-ogr-testing-repo\"\n namespace = \"nonexisting\"\n\n with pytest.raises(OgrException, match=r\".*Namespace doesn't exist.*\"):\n self.service.project_create(repo=name, namespace=namespace)\n project = self.service.get_project(repo=name, namespace=namespace)\n assert not project.exists()\n\n def test_project_create_unauthorized_namespace(self):\n name = \"new-ogr-testing-repo\"\n namespace = \"fedora-magazine\"\n\n with pytest.raises(\n OgrException, match=r\".*Cannot create project in given namespace.*\"\n ):\n self.service.project_create(repo=name, namespace=namespace)\n project = self.service.get_project(repo=name, namespace=namespace)\n assert not project.exists()\n\n\nclass Issues(PagureTests):\n def setUp(self):\n super().setUp()\n self._long_issues_project = None\n\n @property\n def long_issues_project(self):\n if not self._long_issues_project:\n self._long_issues_project = self.service.get_project(\n repo=\"pagure\", namespace=None\n )\n\n return self._long_issues_project\n\n def test_issue_list(self):\n issue_list = self.ogr_project.get_issue_list()\n assert isinstance(issue_list, list)\n\n issue_list = self.ogr_project.get_issue_list(status=IssueStatus.all)\n assert issue_list\n assert len(issue_list) >= 2\n\n def test_issue_list_paginated(self):\n issue_list = self.long_issues_project.get_issue_list()\n assert issue_list\n assert len(issue_list) >= 400\n\n def test_issue_list_author(self):\n issue_list = self.ogr_project.get_issue_list(\n status=IssueStatus.all, author=\"mfocko\"\n )\n assert issue_list\n assert len(issue_list) >= 3\n\n def test_issue_list_nonexisting_author(self):\n issue_list = self.ogr_project.get_issue_list(\n status=IssueStatus.all, author=\"xyzidontexist\"\n )\n assert len(issue_list) == 0\n\n def test_issue_list_assignee(self):\n issue_list = self.ogr_project.get_issue_list(\n status=IssueStatus.all, assignee=\"mfocko\"\n )\n assert issue_list\n assert len(issue_list) == 1\n\n def test_issue_list_labels(self):\n issue_list = self.ogr_project.get_issue_list(\n status=IssueStatus.all, labels=[\"test_label\"]\n )\n assert issue_list\n assert len(issue_list) == 1\n\n\nclass PullRequests(PagureTests):\n def test_pr_create(self):\n pr = self.ogr_fork.pr_create(\n title=\"Testing PR\",\n body=\"Body of the testing PR.\",\n target_branch=\"master\",\n source_branch=\"master\",\n )\n assert pr.title == \"Testing PR\"\n assert pr.description == \"Body of the testing PR.\"\n assert pr.target_branch == \"master\"\n assert pr.source_branch == \"master\"\n assert pr.status == PRStatus.open\n\n def test_pr_list(self):\n pr_list_default = self.ogr_project.get_pr_list()\n assert isinstance(pr_list_default, list)\n\n pr_list = self.ogr_project.get_pr_list(status=PRStatus.all)\n assert pr_list\n assert len(pr_list) >= 2\n\n assert len(pr_list_default) < len(pr_list)\n\n def test_pr_info(self):\n pr_info = self.ogr_project.get_pr_info(pr_id=5)\n assert pr_info\n assert pr_info.title.startswith(\"Test PR\")\n assert pr_info.description.endswith(\"merged prs\")\n assert pr_info.status == PRStatus.merged\n assert pr_info.url == \"https://pagure.io/ogr-tests/pull-request/5\"\n assert (\n pr_info.diff_url\n == \"https://pagure.io/ogr-tests/pull-request/5#request_diff\"\n )\n assert pr_info.head_commit == \"517121273b142293807606dbd7a2e0f514b21cc8\"\n\n\nclass Forks(PagureTests):\n def test_fork(self):\n assert self.ogr_fork.exists()\n assert self.ogr_fork.is_fork\n fork_description = self.ogr_fork.get_description()\n assert fork_description\n a = self.ogr_fork.parent\n assert a\n is_forked = a.is_forked()\n assert is_forked and isinstance(is_forked, bool)\n fork = a.get_fork(create=False)\n assert fork\n assert fork.is_fork\n urls = fork.get_git_urls()\n assert \"{username}\" not in urls[\"ssh\"]\n\n def test_nonexisting_fork(self):\n ogr_project_non_existing_fork = self.service.get_project(\n namespace=None,\n repo=\"ogr-tests\",\n username=\"qwertzuiopasdfghjkl\",\n is_fork=True,\n )\n assert not ogr_project_non_existing_fork.exists()\n with self.assertRaises(PagureAPIException) as ex:\n ogr_project_non_existing_fork.get_description()\n assert \"Project not found\" in ex.exception.pagure_error\n\n def test_fork_property(self):\n fork = self.ogr_project.get_fork()\n assert fork\n assert fork.get_description()\n\n def test_create_fork(self):\n \"\"\"\n Remove your fork of ogr-tests https://pagure.io/fork/$USER/ogr-tests\n before regeneration data.\n But other tests needs to have already existed user fork.\n So regenerate data for other tests, remove data file for this test\n and regenerate it again.\n \"\"\"\n not_existing_fork = self.ogr_project.get_fork(create=False)\n assert not not_existing_fork\n assert not self.ogr_project.is_forked()\n\n old_forks = self.ogr_project.service.user.get_forks()\n\n self.ogr_project.fork_create()\n\n assert self.ogr_project.get_fork().exists()\n assert self.ogr_project.is_forked()\n\n new_forks = self.ogr_project.service.user.get_forks()\n assert len(old_forks) == len(new_forks) - 1\n\n\nclass PagureProjectTokenCommands(PagureTests):\n def setUp(self):\n super().setUp()\n self.token = os.environ.get(\"PAGURE_OGR_TEST_TOKEN\", \"\")\n\n if PersistentObjectStorage().mode == StorageMode.write and (not self.token):\n raise EnvironmentError(\"please set PAGURE_OGR_TEST_TOKEN env variables\")\n\n self.service = PagureService(token=self.token, instance_url=\"https://pagure.io\")\n self._user = None\n self._ogr_project = None\n self._ogr_fork = None\n\n def test_issue_permissions(self):\n owners = self.ogr_project.who_can_close_issue()\n assert \"lachmanfrantisek\" in owners\n\n issue = self.ogr_project.get_issue_info(2)\n assert self.ogr_project.can_close_issue(\"lachmanfrantisek\", issue)\n\n def test_issue_comments(self):\n issue_comments = self.ogr_project._get_all_issue_comments(issue_id=3)\n assert issue_comments\n assert len(issue_comments) == 4\n assert issue_comments[0].body.startswith(\"test\")\n assert issue_comments[1].body.startswith(\"tests\")\n\n def test_issue_info(self):\n issue_info = self.ogr_project.get_issue_info(issue_id=2)\n assert issue_info\n assert issue_info.title.startswith(\"Test 1\")\n assert issue_info.status == IssueStatus.closed\n\n def test_issue_comments_reversed(self):\n issue_comments = self.ogr_project.get_issue_comments(issue_id=3, reverse=True)\n assert len(issue_comments) == 4\n assert issue_comments[0].body.startswith(\"regex\")\n\n def test_issue_comments_regex(self):\n issue_comments = self.ogr_project.get_issue_comments(\n issue_id=3, filter_regex=\"regex\"\n )\n assert len(issue_comments) == 2\n assert issue_comments[0].body.startswith(\"let's\")\n\n def test_issue_comments_regex_reversed(self):\n issue_comments = self.ogr_project.get_issue_comments(\n issue_id=3, filter_regex=\"regex\", reverse=True\n )\n assert len(issue_comments) == 2\n assert issue_comments[0].body.startswith(\"regex\")\n\n def test_update_pr_info(self):\n pr_info = self.ogr_project.get_pr_info(pr_id=4)\n orig_title = pr_info.title\n orig_description = pr_info.description\n\n self.ogr_project.update_pr_info(\n pr_id=4, title=\"changed\", description=\"changed description\"\n )\n pr_info = self.ogr_project.get_pr_info(pr_id=4)\n assert pr_info.title == \"changed\"\n assert pr_info.description == \"changed description\"\n\n self.ogr_project.update_pr_info(\n pr_id=4, title=orig_title, description=orig_description\n )\n pr_info = self.ogr_project.get_pr_info(pr_id=4)\n assert pr_info.title == orig_title\n assert pr_info.description == orig_description\n\n def test_pr_setters(self):\n pr = self.ogr_project.get_pr(pr_id=6)\n\n old_title = pr.title\n pr.title = \"test title\"\n assert pr.title != old_title\n assert pr.title == \"test title\"\n\n pr.title = old_title\n assert pr.title == old_title\n\n old_description = pr.description\n pr.description = \"test description\"\n assert pr.description != old_description\n assert pr.description == \"test description\"\n\n pr.description = old_description\n assert pr.description == old_description\n\n def test_pr_comments_author_regex(self):\n comments = self.ogr_project.get_pr_comments(\n pr_id=4, filter_regex=\"^regex\", author=\"mfocko\"\n )\n assert len(comments) == 1\n assert comments[0].body.endswith(\"test\")\n\n def test_pr_comments_author(self):\n comments = self.ogr_project.get_pr_comments(pr_id=4, author=\"lachmanfrantisek\")\n assert len(comments) == 0\n\n def test_issue_comments_author_regex(self):\n comments = self.ogr_project.get_issue_comments(\n issue_id=3, filter_regex=\"^test[s]?$\", author=\"mfocko\"\n )\n assert len(comments) == 2\n assert comments[0].body == \"test\"\n assert comments[1].body == \"tests\"\n\n def test_issue_comments_author(self):\n comments = self.ogr_project.get_issue_comments(\n issue_id=3, author=\"lachmanfrantisek\"\n )\n assert len(comments) == 0\n\n def test_pr_status(self):\n self.ogr_project.set_commit_status(\n commit=\"360928f7ca08827e8e17cb26851ea57e8d197f87\",\n state=CommitStatus.success,\n target_url=\"https://pagure.io/ogr-tests/pull-request/4\",\n description=\"not failed test\",\n context=\"test\",\n )\n pr = self.ogr_project.get_pr(pr_id=4)\n\n statuses = pr.get_statuses()\n assert statuses\n assert len(statuses) >= 0\n assert statuses[-1].state == CommitStatus.success\n\n def test_is_private(self):\n self.service.instance_url = \"https://src.fedoraproject.org\"\n assert not self.ogr_project.is_private()\n\n def test_token_is_none_then_set(self):\n token = self.service._token\n self.service.change_token(\"\")\n try:\n with pytest.raises(PagureAPIException) as exc:\n self.service.user.get_username()\n assert \"Invalid or expired token\" in str(exc)\n finally:\n self.service.change_token(token)\n\n self.service.user.get_username()\n self.service.user.get_username() # 2nd identical call\n", "sub_path": "tests/integration/test_pagure.py", "file_name": "test_pagure.py", "file_ext": "py", "file_size_in_byte": 18236, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "requre.RequreTestCase", "line_number": 12, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "requre.storage.PersistentObjectStorage", "line_number": 17, "usage_type": "call"}, {"api_name": "requre.utils.StorageMode.write", "line_number": 17, "usage_type": "attribute"}, {"api_name": "requre.utils.StorageMode", "line_number": 17, "usage_type": "name"}, {"api_name": "ogr.PagureService", "line_number": 22, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 209, "usage_type": "call"}, {"api_name": "ogr.exceptions.OgrException", "line_number": 209, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 218, "usage_type": "call"}, {"api_name": "ogr.exceptions.OgrException", "line_number": 219, "usage_type": "argument"}, {"api_name": "ogr.abstract.IssueStatus.all", "line_number": 244, "usage_type": "attribute"}, {"api_name": "ogr.abstract.IssueStatus", "line_number": 244, "usage_type": "name"}, {"api_name": "ogr.abstract.IssueStatus.all", "line_number": 255, "usage_type": "attribute"}, {"api_name": "ogr.abstract.IssueStatus", "line_number": 255, "usage_type": "name"}, {"api_name": "ogr.abstract.IssueStatus.all", "line_number": 262, "usage_type": "attribute"}, {"api_name": "ogr.abstract.IssueStatus", "line_number": 262, "usage_type": "name"}, {"api_name": "ogr.abstract.IssueStatus.all", "line_number": 268, "usage_type": "attribute"}, {"api_name": "ogr.abstract.IssueStatus", "line_number": 268, "usage_type": "name"}, {"api_name": "ogr.abstract.IssueStatus.all", "line_number": 275, "usage_type": "attribute"}, {"api_name": "ogr.abstract.IssueStatus", "line_number": 275, "usage_type": "name"}, {"api_name": "ogr.abstract.PRStatus.open", "line_number": 293, "usage_type": "attribute"}, {"api_name": "ogr.abstract.PRStatus", "line_number": 293, "usage_type": "name"}, {"api_name": "ogr.abstract.PRStatus.all", "line_number": 299, "usage_type": "attribute"}, {"api_name": "ogr.abstract.PRStatus", "line_number": 299, "usage_type": "name"}, {"api_name": "ogr.abstract.PRStatus.merged", "line_number": 310, "usage_type": "attribute"}, {"api_name": "ogr.abstract.PRStatus", "line_number": 310, "usage_type": "name"}, {"api_name": "ogr.exceptions.PagureAPIException", "line_number": 343, "usage_type": "argument"}, {"api_name": "os.environ.get", "line_number": 378, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 378, "usage_type": "attribute"}, {"api_name": "requre.storage.PersistentObjectStorage", "line_number": 380, "usage_type": "call"}, {"api_name": "requre.utils.StorageMode.write", "line_number": 380, "usage_type": "attribute"}, {"api_name": "requre.utils.StorageMode", "line_number": 380, "usage_type": "name"}, {"api_name": "ogr.PagureService", "line_number": 383, "usage_type": "call"}, {"api_name": "ogr.abstract.IssueStatus.closed", "line_number": 406, "usage_type": "attribute"}, {"api_name": "ogr.abstract.IssueStatus", "line_number": 406, "usage_type": "name"}, {"api_name": "ogr.abstract.CommitStatus.success", "line_number": 493, "usage_type": "attribute"}, {"api_name": "ogr.abstract.CommitStatus", "line_number": 493, "usage_type": "name"}, {"api_name": "ogr.abstract.CommitStatus.success", "line_number": 503, "usage_type": "attribute"}, {"api_name": "ogr.abstract.CommitStatus", "line_number": 503, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 513, "usage_type": "call"}, {"api_name": "ogr.exceptions.PagureAPIException", "line_number": 513, "usage_type": "argument"}]} +{"seq_id": "304421328", "text": "import numpy as np\nimport numpy.fft as fft\nimport sys\nimport librosa\nfrom scipy import *\nimport os\n\n\n\n\ndef hps(signal, steps, fft_length):\n f_signal = fft.fft(signal, fft_length)\n f_signal = f_signal[:len(f_signal) // 2]\n f_signal = np.abs(f_signal)\n\n result_length = len(f_signal) // steps\n result = f_signal[:result_length].copy()\n\n for i in range(2, steps + 1):\n result = result * f_signal[::i][:result_length]\n\n return result\n\n\ndef divide(signal, piece_length, overlap_length):\n d = piece_length - overlap_length\n pieces = []\n for i in range(0, len(signal) - piece_length + 1, d):\n pieces.append(signal[i:i + piece_length])\n return pieces\n\n\ndef window(pieces):\n windowed_pieces = []\n for ch in pieces:\n windowed_pieces.append(ch * np.hamming(len(ch)))\n return windowed_pieces\n\n\ndef find_fundamental_frequency(hps_result, sample_rate, signal_length):\n min_f = 85\n start_i = int((min_f / sample_rate) * signal_length)\n\n mx_i = start_i\n for i in range(start_i + 1, len(hps_result)):\n if hps_result[i] > hps_result[mx_i]:\n mx_i = i\n\n return (mx_i / signal_length) * sample_rate\n\n\ndef recognize_gender(file):\n signal, w = librosa.load(file)\n\n signal = signal.astype(float) / 2 ** 16\n w = float(w)\n\n piece_length = 16 * 1024\n overlap_length = piece_length // 2\n hps_steps = 4\n fft_length = 4 * piece_length\n\n pieces = divide(signal, piece_length, overlap_length)\n pieces = window(pieces)\n\n frequencies = []\n for p in pieces:\n hps_result = hps(p, hps_steps, fft_length)\n frequencies.append(find_fundamental_frequency(hps_result, w, fft_length))\n fr = np.median(frequencies)\n if fr < 165:\n return 'M'\n else:\n return 'K'\n\n\ndef main():\n files = sys.argv[1:]\n for i in files:\n recognize_gender(i)\n\n if (len(files) > 0):\n return\n\n countFiles = 0\n correctRecognition = 0\n for filename in os.listdir('train'):\n filePath = os.path.join('train', filename)\n\n if filename.endswith(\".wav\"):\n\n\n countFiles = countFiles + 1\n recognition = recognize_gender(filePath)\n\n if (recognition == filename[4]):\n correctRecognition = correctRecognition + 1\n #else:\n #print(\"This file is not correctly recognized: \",filePath)\n\n print(filePath, recognition)\n\n print('correctly recognized:', correctRecognition, 'All:', countFiles, 'percentage recognized:',correctRecognition / countFiles * 100)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "recognize_gender.py", "file_name": "recognize_gender.py", "file_ext": "py", "file_size_in_byte": 2605, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.fft.fft", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 12, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.hamming", "line_number": 36, "usage_type": "call"}, {"api_name": "librosa.load", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 70, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}]} +{"seq_id": "163639718", "text": "import glob\nimport logging\nimport os\nimport yaml\n\nlog = logging.getLogger('base')\n\n\nclass YamlLoader:\n\n @staticmethod\n def _get_name_of_dictionary(path_to_file):\n # get full file name from array - ex. # machine.yaml\n full_file_name = path_to_file.split('/')[-1] # get full file name\n return full_file_name.split('.')[0] # return the first element of split by '.'\n\n def _get_dictionary(self, path_to_file):\n with open(path_to_file, 'r') as stream:\n try:\n file_name = self._get_name_of_dictionary(path_to_file)\n dictionary = {file_name: yaml.load(stream, Loader=yaml.FullLoader)}\n except yaml.YAMLError as exc:\n self.union_raise_log(f'{exc}')\n finally:\n log.info(f'Yaml file - {path_to_file} is successfully read!')\n\n return dictionary\n\n def get_set_dictionaries(self, path_to_dir):\n file_names = []\n all_dicts = {}\n try:\n file_names = [file for file in os.listdir(path_to_dir) if\n file.endswith(\".yaml\")] # cross platform getting file_names from dir\n except Exception as exc:\n self.union_raise_log(f'{exc}')\n finally:\n log.info('All yaml files are ready to read!')\n\n for file_name in file_names:\n yaml_path = path_to_dir + '/' + file_name\n one_dict = self._get_dictionary(yaml_path)\n try:\n all_dicts.update(one_dict)\n except Exception as exc:\n self.union_raise_log(f'{exc}')\n\n return all_dicts\n\n # execute raise and logging with string\n @staticmethod\n def union_raise_log(string):\n log.error(string)\n raise (Exception(string))\n", "sub_path": "lib/helpers/yaml_loader.py", "file_name": "yaml_loader.py", "file_ext": "py", "file_size_in_byte": 1798, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 21, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 21, "usage_type": "attribute"}, {"api_name": "yaml.YAMLError", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "222238032", "text": "from sklearn.metrics import precision_score, recall_score, f1_score\nimport pandas as pd\nimport joblib\n\ntest_path = './data/test.txt'\ntest_feature_path = './data/test.feature.txt'\n\n#データ読み込み\nnames = ['TITLE', 'CATEGORY']\nX_test = pd.read_csv(test_feature_path, sep='\\t', header=None)\ntest_df = pd.read_csv(test_path, sep='\\t', header=None, names=names)\nY_test = test_df['CATEGORY']\n\n#モデル読み込み\nmodel = joblib.load('model.joblib')\n\n#適合率、再現率、F!スコアの計算\nY_pred = model.predict(X_test)\n\nprecision = precision_score(Y_test, Y_pred, average=None)\nprecision_mi = precision_score(Y_test, Y_pred, average='micro')\nprecision_ma = precision_score(Y_test, Y_pred, average='macro')\n\nrecall = recall_score(Y_test, Y_pred, average=None)\nrecall_mi = recall_score(Y_test, Y_pred, average='micro')\nrecall_ma = recall_score(Y_test, Y_pred, average='macro')\n\nf1 = f1_score(Y_test, Y_pred, average=None)\nf1_mi = f1_score(Y_test, Y_pred, average='micro')\nf1_ma = f1_score(Y_test, Y_pred, average='macro')\n\nprint(f'適合率: {precision}\\tマイクロ平均: {precision_mi}\\tマクロ平均: {precision_ma}')\nprint(f'再現率: {recall}\\tマイクロ平均: {recall_mi}\\tマクロ平均: {recall_ma}')\nprint(f'F1スコア: {f1}\\tマイクロ平均: {f1_mi}\\tマクロ平均: {f1_ma}')", "sub_path": "masamune/chapter06/knock56.py", "file_name": "knock56.py", "file_ext": "py", "file_size_in_byte": 1311, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "167221556", "text": "#!/usr/bin/env python\nimport tweepy, sys, os\nimport matplotlib.pyplot as plt\nfrom nltk.tokenize import TweetTokenizer\nfrom termcolor import colored\nfrom nltk.corpus import wordnet as wn\nfrom nltk.corpus import stopwords as stopwordsNLTK\nimport spacy\nimport nltk\nimport pandas as pd\nimport regex as re\nimport itertools\nimport collections\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nimport random\nimport numpy as np\nimport pandas as pd\nfrom os import path\nfrom PIL import Image\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nimport matplotlib.pyplot as plt\nfrom matplotlib import interactive\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport arabic_reshaper # this was missing in your code\nfrom bidi.algorithm import get_display\nimport tkinter as tk\nimport tkinter.scrolledtext as tkscrolled\nfrom tkinter import *\n\n\nfrom matplotlib.backends.backend_tkagg import (\n FigureCanvasTkAgg, NavigationToolbar2Tk)\n# Implement the default Matplotlib key bindings.\nfrom matplotlib.backend_bases import key_press_handler\nfrom matplotlib.figure import Figure\n\n\n\n\n\n\n\ndef plotthem(data1,data2):\n plt.figure(1)\n plt.imshow(data1, interpolation='bilinear')\n plt.axis(\"off\")\n plt.gcf().canvas.draw()\n\n\n positive = data2[0]\n negative = data2[1]\n\n labels = ['Positive [' + str(positive) + '%]', 'Negative [' + str(negative) + '%]']\n sizes = [positive, negative]\n colors = ['yellowgreen', 'red']\n\n plt.figure(2)\n patches, texts = plt.pie(sizes, colors=colors, startangle=90)\n plt.legend(patches, labels, loc=\"best\")\n plt.title(\" by analyzing \" + str(noOfSearchTerms) + \" Tweets.\")\n plt.axis('equal')\n plt.tight_layout()\n plt.gcf().canvas.draw()\n\n '''\n plt.figure(2)\n plt.clf()\n x = np.arange(0.0,3.0,0.01)\n y = np.tan(2*np.pi*x+random.random())\n plt.plot(x,y)\n plt.gcf().canvas.draw()\n\n '''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# plot function is created for\n# plotting the graph in\n# tkinter window\ndef plot(data1):\n # the figure that will contain the plot\n\n fig = plt.figure(1)\n #fig = plt.subplot(2, 1, 1)\n plt.imshow(data1, interpolation='bilinear')\n plt.axis(\"off\")\n\n # adding the subplot\n #plot1 = fig.add_subplot(111)\n\n # creating the Tkinter canvas\n # containing the Matplotlib figure\n canvas = FigureCanvasTkAgg(fig,\n master=root)\n\n\n canvas.draw()\n\n # placing the canvas on the Tkinter window\n canvas.get_tk_widget().pack()\n\n\n\n\ndef percentage(part, whole):\n return 100 * float(part) / float(whole)\n\n##################### MACHINE LEARNING PART\n\ndef read_tsv(data_file):\n text_data = list()\n labels = list()\n infile = open(data_file, encoding='utf-8')\n for line in infile:\n if not line.strip():\n continue\n label, text = line.split('\\t')\n text_data.append(text)\n labels.append(label)\n return text_data, labels\n\n\ndef classify(input_text, classifier, x_train, y_train):\n\n pipeline = Pipeline([\n ('vect', TfidfVectorizer(min_df=0.0001, max_df=0.95,\n analyzer='word', lowercase=False,\n )),\n ('clf', classifier),\n ])\n\n pipeline.fit(x_train, y_train)\n feature_names = pipeline.named_steps['vect'].get_feature_names()\n\n y_predicted = pipeline.predict(input_text)\n\n return y_predicted, feature_names\n\ndef load(pos_train_file, neg_train_file, pos_test_file, neg_test_file):\n pos_train_data, pos_train_labels = read_tsv(pos_train_file)\n neg_train_data, neg_train_labels = read_tsv(neg_train_file)\n\n pos_test_data, pos_test_labels = read_tsv(pos_test_file)\n neg_test_data, neg_test_labels = read_tsv(neg_test_file)\n\n x_train = pos_train_data + neg_train_data\n y_train = pos_train_labels + neg_train_labels\n\n x_test = pos_test_data + neg_test_data\n y_test = pos_test_labels + neg_test_labels\n\n return x_train, y_train, x_test, y_test\n\n##################### DATA ANALYTICS PART\n\n\ndef show_home_tweets_to_txt(api,n):\n for status in tweepy.Cursor(api.home_timeline).items(n):\n print(status._json)\n\n\n\ndef show_world_locations(api):\n # Get all the locations where Twitter provides trends service\n worldTrendsLocations = api.trends_available()\n print(worldTrendsLocations)\n\n\n# country is constant per region\n# links for all the region:\n# world wide id = 1\n# Egypt id = 28584965\ndef trends_for_region_to_txt(region_id):\n f = open(\"trendings.txt\", \"w+\", encoding=\"utf-8\")\n trends = api.trends_place(id=region_id)\n\n width, height = 50, 20\n TKScrollTXT = tkscrolled.ScrolledText(root, width=width, height=height, wrap='word')\n TKScrollTXT.pack()\n\n\n for value in trends:\n for trend in value['trends']:\n f.write(\"This is a trend -> \" + trend['name'] + \"\\n\")\n f.write(\"-------------------- \\n\")\n\n TKScrollTXT.insert(INSERT, trend['name'] + \"\\n\")\n\n TKScrollTXT.config(state=DISABLED)\n\n f.close()\n\n\n# the past few weeks of the tweets\ndef tweets_of_topic_to_txt(api,searchTerm,date,noOfSearchTerms=100, lang=\"ar\"):\n tweets = api.search(q=searchTerm, date=date, lang=lang, count=noOfSearchTerms, tweet_mode=\"extended\")\n return tweets\n\n\ndef tweeters_of_topic_to_txt(topic_tweets):\n f = open(\"tweeters.txt\", \"w+\", encoding=\"utf-8\")\n users_locs = [[tweet.user.screen_name, tweet.user.location] for tweet in topic_tweets]\n\n for value in users_locs:\n f.write(\"The user name: \" + value[0] + \" -. [region]: \" + value[1] + \"\\n\")\n f.write(\"-------------------- \\n\")\n\n f.close()\n\ndef tweets_to_txt(tweets):\n f = open(\"tweets.txt\", \"w+\", encoding=\"utf-8\")\n for tweet in tweets:\n tweet_line = tweet.full_text.replace(\"\\n\",\" \")\n f.write( tweet_line + \"\\n\")\n #f.write(\"-------------------- \\n\")\n\n f.close()\n\ndef word_freq(tweets):\n f = open(\"word_freq.txt\", \"w+\", encoding=\"utf-8\")\n words_in_tweet = [tweet.full_text.split() for tweet in tweets]\n words_in_tweet_strings = \" \"\n\n for tweet in tweets:\n words_in_tweet_strings += tweet.full_text\n\n # List of all words across tweets\n all_words_all_tweets = list(itertools.chain(*words_in_tweet))\n # Create counter\n counts = collections.Counter(all_words_all_tweets)\n\n for value in counts.most_common(20):\n f.write(\"The word '\" + value[0] + \"' is repeated -> \" + str(value[1]) + \"\\n\")\n f.write(\"-------------------- \\n\")\n f.close()\n\n\ndef visualization(tweets,y_predicted,noOfSearchTerms):\n # Create and generate a word cloud image:\n words_in_tweet_strings = \" \"\n\n weridPatterns = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n u\"\\U0001f926-\\U0001f937\"\n u'\\U00010000-\\U0010ffff'\n u\"\\u200d\"\n u\"\\u2640-\\u2642\"\n u\"\\u2600-\\u2B55\"\n u\"\\u23cf\"\n u\"\\u23e9\"\n u\"\\u231a\"\n u\"\\u3030\"\n u\"\\ufe0f\"\n u\"\\u2069\"\n u\"\\u2066\"\n u\"\\u200c\"\n u\"\\u2068\"\n u\"\\u2067\"\n \"]+\", flags=re.UNICODE)\n\n for tweet in tweets:\n words_in_tweet_strings += tweet.full_text\n\n clean_text = weridPatterns.sub(r'', words_in_tweet_strings)\n\n data = arabic_reshaper.reshape(clean_text)\n data = get_display(data) # add this line\n wordcloud = WordCloud(font_path='arial', background_color='white',\n mode='RGB', width=2000, height=1000).generate(data)\n\n\n ####### Visualization\n n_all = len(y_predicted)\n n_pos = 0\n n_neg = 0\n\n for y in y_predicted:\n if y == 'pos':\n n_pos += 1\n\n if y == 'neg':\n n_neg += 1\n\n positive = percentage(n_pos, n_all)\n negative = percentage(n_neg, n_all)\n\n positive = format(positive,'.2f')\n negative = format(negative, '.2f')\n\n plotthem(wordcloud,[positive,negative])\n\n\n##############\n\nintro = '''\n___ _ _ _ _ ___ ___ ____ ____ \n | | | | | | | |___ |__/ \n | |_|_| | | | |___ | \\ \n'''\n\nprint(colored(intro, 'blue'))\n\n\nconsumerKey = \"T2jXUHLe4dSvtlmZYuoZH6yeA\"\nconsumerSecret = \"jetuh1jZaXGTHrZ0hFMkAGm695SRc6EzX6Rw6dyUqsdnuPlzYC\"\naccessToken = \"1285916588697362435-liwPeMLnktBFrzwnIX7IPlekJhPj8f\"\naccessTokenSecret = \"tsyyuNVO82bzqx5WAsBdo8es0WWtXCXuWOZtzo11f23HA\"\n\n\nauth = tweepy.OAuthHandler(consumerKey, consumerSecret)\nauth.set_access_token(accessToken, accessTokenSecret)\napi = tweepy.API(auth, wait_on_rate_limit=True)\n\nsearchTerm =\"\"\nnoOfSearchTerms = 10\n#searchTerm = input(\"Enter keyword/hashtag to search about: \")\n#noOfSearchTerms = int(input(\"Enter how many tweets to analyze: \"))\n#searchTerm = searchTerm + \" -filter:retweets\"\ndate_since = \"2020-8-15\"\n\n\n\n\n\n\n\n\ndef analyze():\n # init figures\n fig1 = plt.figure()\n canvas1 = FigureCanvasTkAgg(fig1, master=root)\n canvas1.get_tk_widget().pack()\n\n fig2 = plt.figure()\n canvas2 = FigureCanvasTkAgg(fig2, master=root)\n canvas2.get_tk_widget().pack()\n\n # timeline\n # show_home_tweets_to_txt(api,10)\n\n # tweets of a given hashtag\n tweets = tweets_of_topic_to_txt(api, searchTerm, date_since)\n tweets_to_txt(tweets)\n\n # tweeters of certain topic\n tweeters_of_topic_to_txt(tweets)\n\n # word freq\n word_freq(tweets)\n\n # visualization\n\n pos_training = 'input/train_Arabic_tweets_positive_20190413.tsv'\n neg_training = 'input/train_Arabic_tweets_negative_20190413.tsv'\n\n pos_testing = 'input/test_Arabic_tweets_positive_20190413.tsv'\n neg_testing = 'input/test_Arabic_tweets_negative_20190413.tsv'\n\n # sample tweets from text file now\n with open('tweets.txt', encoding=\"utf8\") as f:\n input_tweets = [line.rstrip() for line in f]\n\n # available classifiers:\n '''\n LinearSVC(), SVC(), MultinomialNB(),\n BernoulliNB(), SGDClassifier(), DecisionTreeClassifier(max_depth=5),\n RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),\n KNeighborsClassifier(3)\n '''\n\n width, height = 70, 20\n out = tkscrolled.ScrolledText(root, width=width, height=height, wrap='word')\n out.place(x=40, y=570)\n\n\n f = open(\"tweets_classified.txt\", \"w+\", encoding=\"utf-8\")\n x_train, y_train, x_test, y_test = load(pos_training, neg_training, pos_testing, neg_testing)\n y_predicted, feature_names = classify(input_tweets, BernoulliNB(), x_train, y_train)\n for i in range(len(y_predicted)):\n f.write(str(i) + \"-Tweet: \" + input_tweets[i] + \" ---> \" + y_predicted[i] + \"\\n\")\n f.write(\"-------------------- \\n\")\n\n out.insert(INSERT, input_tweets[i] + \" ---> \" + y_predicted[i] + \"\\n\")\n out.insert(INSERT, \"=====================================\" + \"\\n\")\n\n out.config(state=DISABLED)\n out.xview_moveto(1)\n f.close()\n\n\n visualization(tweets, y_predicted, noOfSearchTerms)\n\n\ndef getData():\n global searchTerm\n global noOfSearchTerms\n x1 = entry1.get()\n x2 = entry2.get()\n searchTerm = x1\n noOfSearchTerms = int(x2)\n analyze()\n\n\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n\n\nroot = tk.Tk()\nroot.title('TWITTER ANALYSIS')\nroot.geometry(\"2000x1000\") #Width x Height\n\n\n\nwt =Label(root,text=\"Egypt trends\")\nwt.pack()\n\n# show hashtags for a given region\n# show_world_locations(api)\ntrends_for_region_to_txt(23424802) # woeid for Egypt\n\n\ncanvas1 = tk.Canvas(root, width=400, height=100)\ncanvas1.pack()\n\n\n\n\nkeyword = Label(root,\n text=\"keyword\").place(x = 600,\n y = 375)\n\n\n\nnumber = Label(root,\n text=\"number\").place(x = 600,\n y = 405)\n\n\n\nentry1 = tk.Entry(root)\ncanvas1.create_window(200, 40, window=entry1)\n\nentry2 = tk.Entry(root)\ncanvas1.create_window(200, 70, window=entry2)\n\nbutton1 = tk.Button(text='Analyze', command=getData)\ncanvas1.create_window(200, 95, window=button1)\nsearchTerm = searchTerm + \" -filter:retweets\"\n#date_since = \"2020-8-15\"\n\n\noutput_text = Label(root,\n text=\"output tweets:\").place(x = 40,\n y = 540)\n\n\nroot.mainloop()\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 13262, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "warnings.filterwarnings", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pie", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 115, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 147, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 148, "usage_type": "call"}, {"api_name": "tweepy.Cursor", "line_number": 180, "usage_type": "call"}, {"api_name": "tkinter.scrolledtext.ScrolledText", "line_number": 200, "usage_type": "call"}, {"api_name": "tkinter.scrolledtext", "line_number": 200, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 250, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 252, "usage_type": "call"}, {"api_name": "regex.compile", "line_number": 264, "usage_type": "call"}, {"api_name": "regex.UNICODE", "line_number": 286, "usage_type": "attribute"}, {"api_name": "arabic_reshaper.reshape", "line_number": 293, "usage_type": "call"}, {"api_name": "bidi.algorithm.get_display", "line_number": 294, "usage_type": "call"}, {"api_name": "wordcloud.WordCloud", "line_number": 295, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 328, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 337, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 339, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 357, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 357, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 358, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 361, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 361, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 362, "usage_type": "call"}, {"api_name": "tkinter.scrolledtext.ScrolledText", "line_number": 399, "usage_type": "call"}, {"api_name": "tkinter.scrolledtext", "line_number": 399, "usage_type": "name"}, {"api_name": "sklearn.naive_bayes.BernoulliNB", "line_number": 405, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 435, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 449, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 467, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 470, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 473, "usage_type": "call"}]} +{"seq_id": "564577622", "text": "from sklearn.linear_model import LinearRegression as SKLearnLinearRegression\nfrom ..data_models.forecast import Prediction, RaceForecast\nfrom ..data_models.util import TrainingSet\nfrom sklearn.preprocessing import PolynomialFeatures\nimport numpy as np\n\n\nclass LinearRegression:\n def __init__(self, horses, distance, track_type):\n \"\"\"\n :param fixture: List contains races which contains Fixture model\n \"\"\"\n self.horses = horses\n self.distance = distance\n self.track_type = track_type\n\n def forecast(self):\n predictions = list()\n for result in self.horses:\n horse_results = result.past_results\n if horse_results:\n training_set = TrainingSet(horse_results, self.track_type)\n if training_set.validate():\n machine = SKLearnLinearRegression()\n machine.fit(training_set.x, training_set.y)\n\n prediction = machine.predict(self.distance)[0]\n\n predictions.append(Prediction(horse_id=result.horse_id,\n horse_name=result.horse_name,\n prediction=prediction,\n result_count=\"{0}/{1}\".format(len(training_set.x), len(horse_results))))\n\n return RaceForecast('Linear Regression', predictions)\n\n\nclass PolynomialRegression:\n def __init__(self, horses, distance, track_type):\n \"\"\"\n :param fixture: List contains races which contains Fixture model\n \"\"\"\n self.horses = horses\n self.distance = distance\n self.track_type = track_type\n\n def forecast(self):\n predictions = list()\n for horse in self.horses:\n horse_results = horse.past_results\n if horse_results:\n training_set = TrainingSet(horse_results, self.track_type)\n\n if training_set.validate():\n poly_reg = PolynomialFeatures(degree=2)\n X_poly = poly_reg.fit_transform(training_set.x)\n\n machine = SKLearnLinearRegression()\n machine.fit(X_poly, training_set.y)\n\n try:\n per = poly_reg.fit_transform(self.distance)\n prediction = machine.predict(per)[0]\n\n predictions.append(Prediction(horse_id=horse.horse_id,\n horse_name=horse.horse_name,\n prediction=prediction,\n result_count=\"{0}/{1}\".format(len(training_set.x),\n len(horse_results))))\n except ValueError as error:\n logger.info(\"{0}:{1} failed to get a prediction from {2}, error: {3}\".format(horse.horse_id,\n horse.horse_name,\n type(self),\n error))\n return RaceForecast('Polynomial Regression', predictions)\n\n @property\n def best_worst(self):\n \"\"\"\n Add the best + worst records from all the horses' past results (results in the same distance and track type\n of the race) that are going to participate in the race, then divide that value by two.\n :return: Best and worst time from all horses divided by two\n \"\"\"\n filtered_results = [result for result in self.get_past_results_of_participant_horses()]\n\n filtered_results.sort(key=lambda x: x.time_as_seconds)\n\n best = filtered_results[0].time_as_seconds\n worst = filtered_results[-1].time_as_seconds\n\n return (best + worst) / 2\n\n @property\n def mean(self):\n return np.mean([result.time_as_seconds for result in self.get_past_results_of_participant_horses()])\n\n def get_past_results_of_participant_horses(self):\n rtn_results = []\n for horse in self.horses:\n rtn_results += horse.past_results\n return rtn_results\n\n def boost(self, training_set, strategy_property):\n \"\"\"\n If the horse never ran on the distance +/-100 than we add a phantom record calculated with either mean or \n best_worst depending on the strategy_property.\n\n :param strategy_property: PolynomialRegression.mean or PolynomialRegression.best_worst\n :param training_set: horsing_around.forecaster.data_models.util.TrainingSet object\n :return: boosted horsing_around.forecaster.data_models.util.TrainingSet object\n \"\"\"\n different_distanced_results = [r.distance for r in training_set.x if r.distance != self.distance]\n\n if not different_distanced_results:\n raise Exception('')\n # This horse is running this distance officially first time in his life therefore we boost\n training_set.append(strategy_property, self.distance)\n\n def result_matches_this_race(self, result):\n return self.track_type in result.track_type and self.distance == result.distance\n", "sub_path": "horsing_around/forecaster/regression/regressors.py", "file_name": "regressors.py", "file_ext": "py", "file_size_in_byte": 5390, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "data_models.util.TrainingSet", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 24, "usage_type": "call"}, {"api_name": "data_models.forecast.Prediction", "line_number": 29, "usage_type": "call"}, {"api_name": "data_models.forecast.RaceForecast", "line_number": 34, "usage_type": "call"}, {"api_name": "data_models.util.TrainingSet", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 57, "usage_type": "call"}, {"api_name": "data_models.forecast.Prediction", "line_number": 64, "usage_type": "call"}, {"api_name": "data_models.forecast.RaceForecast", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "224608245", "text": "import os\nimport sys\nimport time\nimport shlex\nimport pathlib\nimport subprocess\n\n# Target will be a board, \"test\", \"docs\", \"mpy-cross-mac\", or \"windows\"\nTARGET = sys.argv[1]\n\n# Submodules needed by port builds outside of their ports directory.\n# Should we try and detect these?\nPORT_DEPS = {\n \"atmel-samd\": [\n \"extmod/ulab/\",\n \"lib/adafruit_floppy/\",\n \"lib/mp3/\",\n \"lib/protomatter/\",\n \"lib/quirc/\",\n \"lib/tinyusb/\",\n \"data/nvm.toml/\",\n ],\n \"broadcom\": [\"extmod/ulab/\", \"lib/tinyusb/\"],\n \"cxd56\": [\"extmod/ulab/\", \"lib/tinyusb/\"],\n \"espressif\": [\n \"extmod/ulab/\",\n \"lib/certificates/nina-fw/\",\n \"lib/protomatter/\",\n \"lib/quirc/\",\n \"lib/tinyusb/\",\n ],\n \"litex\": [\"extmod/ulab/\", \"lib/tinyusb/\"],\n \"mimxrt10xx\": [\"extmod/ulab/\", \"lib/tinyusb/\", \"data/nvm.toml/\"],\n \"nrf\": [\"extmod/ulab/\", \"lib/mp3/\", \"lib/protomatter/\", \"lib/tinyusb/\", \"data/nvm.toml/\"],\n \"raspberrypi\": [\n \"extmod/ulab/\",\n \"lib/adafruit_floppy/\",\n \"lib/mbedtls/\",\n \"lib/mp3/\",\n \"lib/certificates/nina-fw/\",\n \"lib/protomatter/\",\n \"lib/quirc/\",\n \"lib/tinyusb/\",\n \"data/nvm.toml/\",\n ],\n \"silabs\": [\"extmod/ulab/\", \"data/nvm.toml/\"],\n \"stm\": [\"extmod/ulab/\", \"lib/mp3/\", \"lib/protomatter/\", \"lib/tinyusb/\", \"data/nvm.toml/\"]\n # omit unix which is part of the \"test\" target below\n}\n\n\ndef run(title, command, check=True):\n print(\"::group::\" + title, flush=True)\n print(command, flush=True)\n start = time.monotonic()\n try:\n subprocess.run(shlex.split(command), stderr=subprocess.STDOUT, check=check)\n finally:\n print(\"::endgroup::\", flush=True)\n print(\"Duration:\", time.monotonic() - start, flush=True)\n\n\ndef set_output(name, value):\n if \"GITHUB_OUTPUT\" in os.environ:\n with open(os.environ[\"GITHUB_OUTPUT\"], \"at\") as f:\n print(f\"{name}={value}\", file=f)\n else:\n print(f\"Would set GitHub actions output {name} to '{value}'\")\n\n\ndef main():\n submodules = []\n submodules_tags = []\n\n print(\"Target:\", TARGET)\n\n if TARGET == \"scheduler\":\n # submodules = [\"tools/\"]\n submodules = [\"extmod/ulab\", \"lib/\", \"tools/\"]\n elif TARGET == \"tests\":\n submodules = [\"extmod/ulab\", \"lib/\", \"tools/\"]\n elif TARGET == \"docs\":\n # used in .readthedocs.yml to generate RTD\n submodules = [\"extmod/ulab\"]\n submodules_tags = [\"frozen/\"]\n elif TARGET == \"mpy-cross\" or TARGET == \"mpy-cross-mac\":\n submodules = [\"tools/\"] # for huffman\n elif TARGET == \"windows\":\n # This builds one board from a number of ports so fill out a bunch of submodules\n for port in (\"atmel-samd\", \"nrf\", \"raspberrypi\", \"stm\"):\n submodules.append(f\"ports/{port}\")\n submodules.extend(PORT_DEPS[port])\n unique_submodules = set(submodules)\n submodules = list(unique_submodules)\n elif TARGET == \"website\":\n submodules = [\"tools/adabot/\"]\n submodules_tags = [\"frozen/\"]\n elif TARGET == \"pre-commit\":\n submodules = [\"extmod/ulab\"]\n else:\n p = list(pathlib.Path(\".\").glob(f\"ports/*/boards/{TARGET}/mpconfigboard.mk\"))\n if not p:\n raise RuntimeError(f\"Unsupported target: {TARGET}\")\n\n config = p[0]\n # Add the ports folder to init submodules\n port_folder = config.parents[2]\n port = port_folder.name\n submodules.append(str(port_folder))\n submodules.append(\"tools/\") # for huffman\n submodules.extend(PORT_DEPS[port])\n with config.open() as f:\n for line in f.readlines():\n prefix = \"FROZEN_MPY_DIRS += $(TOP)/\"\n if line.startswith(prefix):\n lib_folder = line.strip()[len(prefix) :]\n # Drop everything after the second folder because the frozen\n # folder may be inside the submodule.\n if lib_folder.count(\"/\") > 1:\n lib_folder = lib_folder.split(\"/\", maxsplit=2)\n lib_folder = \"/\".join(lib_folder[:2])\n submodules_tags.append(lib_folder)\n\n print(\"Submodule tags[Y]:\", submodules_tags)\n print(\"Submodule tags[N]:\", submodules)\n\n if submodules_tags:\n run(\n \"Init the submodules with tags\",\n f\"git submodule update --init {' '.join(submodules_tags)}\",\n )\n\n if submodules:\n run(\n \"Init the submodules without tags\",\n f\"git submodule update --init --depth=1 {' '.join(submodules)}\",\n )\n\n for submodule in submodules_tags:\n if submodule.startswith(\"frozen\"):\n set_output(\"frozen_tags\", True)\n break\n else:\n set_output(\"frozen_tags\", False)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "tools/ci_fetch_deps.py", "file_name": "ci_fetch_deps.py", "file_ext": "py", "file_size_in_byte": 4868, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "time.monotonic", "line_number": 55, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 57, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 57, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 57, "usage_type": "attribute"}, {"api_name": "time.monotonic", "line_number": 60, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "517792554", "text": "import time\nimport typing\n\nimport services\nfrom NeonOcean.Order import Debug, Director, Mods, This\nfrom NeonOcean.Order.Interactions.Support import RegistrationShared\nfrom NeonOcean.Order.Tools import Exceptions\nfrom objects import script_object\nfrom sims4 import resources\nfrom sims4.tuning import instance_manager\n\nclass RegistrationHandler:\n\tHost = This.Mod # type: Mods.Mod\n\n\t_registeredInteractions = list() # type: typing.List[typing.Type[RegistrationShared.RegistrationExtensionAbstract]]\n\t_typeDeterminers = dict() # type: typing.Dict[typing.Callable, str]\n\n\tdef __init_subclass__ (cls, **kwargs):\n\t\tcls._registeredInteractions = list()\n\t\tcls._typeDeterminers = dict()\n\n\t@classmethod\n\tdef HandleInteraction (cls, interactionReference: typing.Type[RegistrationShared.RegistrationExtensionAbstract]) -> None:\n\t\t\"\"\"\n\t\tRegister an interaction to be handled by this handler.\n\t\t:param interactionReference:\n\t\t:return:\n\t\t\"\"\"\n\n\t\tif not isinstance(interactionReference, type):\n\t\t\traise Exceptions.IncorrectTypeException(interactionReference, \"interactionReference\", (type,))\n\n\t\tif not issubclass(interactionReference, RegistrationShared.RegistrationExtensionAbstract):\n\t\t\traise Exception(\"Interaction does not inherit the registration extension\")\n\n\t\tif interactionReference in cls._registeredInteractions:\n\t\t\treturn\n\n\t\tcls._registeredInteractions.append(interactionReference)\n\n\t@classmethod\n\tdef StopHandlingInteraction (cls, interactionReference: typing.Type[RegistrationShared.RegistrationExtensionAbstract]) -> None:\n\t\tif not interactionReference in cls._registeredInteractions:\n\t\t\treturn\n\n\t\tcls._registeredInteractions.remove(interactionReference)\n\n\t@classmethod\n\tdef RegisterTypeDeterminer (cls, typeDeterminer: typing.Callable[[typing.Type[script_object.ScriptObject]], bool], typeIdentifier: str) -> None:\n\t\t\"\"\"\n\t\tRegister a method to determine the type of an object.\n\t\t:param typeDeterminer: A type determiner, this should take the object reference and give a boolean indicating whether or not it is of that type. This should be\n\t\tunique as type determiners are stored in a dictionary by this value.\n\t\t:param typeIdentifier: The type identifier, used to signal to interactions what type of object it is. Non built-in type determiners should have unique\n\t\tidentifiers to avoid conflicts. Case does not matter for type identifier strings.\n\t\t\"\"\"\n\n\t\tif typeDeterminer in cls._typeDeterminers:\n\t\t\treturn\n\n\t\tcls._typeDeterminers[typeDeterminer] = typeIdentifier.lower()\n\n\t@classmethod\n\tdef UnregisterTypeDeterminer (cls, typeDeterminer: typing.Callable[[typing.Type[script_object.ScriptObject]], bool]) -> None:\n\t\t\"\"\"\n\t\tUnregister a object type determiner.\n\t\t:param typeDeterminer: The type determiner to be removed from the list.\n\t\t\"\"\"\n\n\t\tcls._typeDeterminers.pop(typeDeterminer, None)\n\n\t@classmethod\n\tdef RegisterAllInteractions (cls) -> None:\n\t\t\"\"\"\n\t\tRegister all interactions handled by this handler.\n\t\t\"\"\"\n\n\t\tcls.RegisterInteractions(cls._registeredInteractions)\n\n\t@classmethod\n\tdef RegisterInteractions (cls, interactions: typing.List[typing.Type[RegistrationShared.RegistrationExtensionAbstract]]) -> None:\n\t\t\"\"\"\n\t\tRegister a list of interactions.\n\t\t\"\"\"\n\n\t\toperationStartTime = time.time() # type: float\n\n\t\t# noinspection PyProtectedMember\n\t\tobjectReferences = services.get_instance_manager(resources.Types.OBJECT)._tuned_classes.values() # type: typing.List[typing.Type[script_object.ScriptObject]]\n\n\t\tguidTargets = dict() # type: typing.Dict[int, list]\n\t\ttypeTargets = dict() # type: typing.Dict[str, list]\n\n\t\tfor interactionReference in interactions: # type: typing.Type[RegistrationShared.RegistrationExtensionAbstract]\n\t\t\tfor relevantGUID in interactionReference.GetRelevantObjectGUIDs(): # type: int\n\t\t\t\tif relevantGUID not in guidTargets:\n\t\t\t\t\tguidTargets[relevantGUID] = [interactionReference]\n\t\t\t\telse:\n\t\t\t\t\tguidTargets[relevantGUID].append(interactionReference)\n\n\t\t\tfor relevantType in interactionReference.GetRelevantObjectTypes(): # type: str\n\t\t\t\trelevantType = relevantType.lower()\n\n\t\t\t\tif relevantType not in typeTargets:\n\t\t\t\t\ttypeTargets[relevantType] = [interactionReference]\n\t\t\t\telse:\n\t\t\t\t\ttypeTargets[relevantType].append(interactionReference)\n\n\t\trelevantTypeDeterminers = dict() # type: typing.Dict[typing.Callable, str]\n\n\t\tfor typeDeterminer, typeIdentifier in cls._typeDeterminers.items(): # type: typing.Callable, str\n\t\t\ttypeIdentifier = typeIdentifier.lower()\n\n\t\t\tif typeIdentifier in typeTargets:\n\t\t\t\trelevantTypeDeterminers[typeDeterminer] = typeIdentifier\n\n\t\tshouldTypeCheck = bool(relevantTypeDeterminers) # type: bool\n\n\t\tfor objectReference in objectReferences: # type: typing.Type[script_object.ScriptObject]\n\t\t\tobjectDeterminedTypes = list() # type: typing.List[str]\n\n\t\t\tif shouldTypeCheck:\n\t\t\t\tfor typeDeterminer, typeIdentifier in relevantTypeDeterminers.items(): # type: typing.Callable, str\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif typeDeterminer(objectReference):\n\t\t\t\t\t\t\tobjectDeterminedTypes.append(typeIdentifier)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tDebug.Log(\"Type determiner failed to determine if an object matches the type identifier '\" + typeIdentifier + \"'.\\n\" + str(objectReference), cls.Host.Namespace, Debug.LogLevels.Exception, group = cls.Host.Namespace, owner = __name__)\n\n\t\t\tif objectReference.guid in guidTargets:\n\t\t\t\tfor interactionReference in guidTargets[objectReference.guid]:\n\t\t\t\t\tinteractionReference.RegisterObject(objectReference)\n\n\t\t\tfor objectDeterminedType in objectDeterminedTypes: # type: str\n\t\t\t\tif objectDeterminedType in typeTargets:\n\t\t\t\t\tfor interactionReference in typeTargets[objectDeterminedType]:\n\t\t\t\t\t\tinteractionReference.RegisterObject(objectReference)\n\n\t\toperationTime = time.time() - operationStartTime\n\t\tDebug.Log(\"Finished Registering %s interactions in %s seconds with %s game objects existing.\" % (len(interactions), operationTime, len(objectReferences)), cls.Host.Namespace, Debug.LogLevels.Info, group = cls.Host.Namespace, owner = __name__)\n\nclass _Announcer(Director.Announcer):\n\t_level = 500 # type: int\n\n\t@classmethod\n\tdef InstanceManagerOnStart (cls, instanceManager: instance_manager.InstanceManager):\n\t\tif instanceManager.TYPE != resources.Types.OBJECT:\n\t\t\treturn\n\n\t\tRegistrationHandler.RegisterAllInteractions()\n\ndef RegisterInteractionToAll (interactionReference: typing.Type) -> None:\n\t\"\"\"\n\tAttach an interaction to every object in the game.\n\t:param interactionReference: The interaction, this doesn't need to inherit the RegistrationExtension class to work.\n\t\"\"\"\n\n\t# noinspection PyProtectedMember\n\tobjectReferences = services.get_instance_manager(resources.Types.OBJECT)._tuned_classes.values() # type: typing.List[typing.Type[script_object.ScriptObject]]\n\n\tfor objectReference in objectReferences: # type: typing.Type[script_object.ScriptObject]\n\t\tRegisterInteraction(interactionReference, objectReference)\n\n# noinspection PyProtectedMember\ndef RegisterInteraction (interactionReference: typing.Type, objectReference: typing.Type[script_object.ScriptObject]) -> None:\n\t\"\"\"\n\tAttach an interaction to a specific object. If the interaction is already attached nothing will happen.\n\t:param interactionReference: The interaction, this doesn't need to inherit the RegistrationExtension class to work.\n\t:param objectReference: The script object that the interaction is meant to be attached to.\n\t\"\"\"\n\n\tif interactionReference in objectReference._super_affordances:\n\t\treturn\n\n\tobjectReference._super_affordances += (interactionReference,)\n", "sub_path": "Python/NeonOcean.Order/NeonOcean/Order/Interactions/Support/RegistrationHandler.py", "file_name": "RegistrationHandler.py", "file_ext": "py", "file_size_in_byte": 7415, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "NeonOcean.Order.This.Mod", "line_number": 13, "usage_type": "attribute"}, {"api_name": "NeonOcean.Order.This", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 23, "usage_type": "attribute"}, {"api_name": "NeonOcean.Order.Interactions.Support.RegistrationShared.RegistrationExtensionAbstract", "line_number": 23, "usage_type": "attribute"}, {"api_name": "NeonOcean.Order.Interactions.Support.RegistrationShared", "line_number": 23, "usage_type": "name"}, {"api_name": "NeonOcean.Order.Tools.Exceptions.IncorrectTypeException", "line_number": 31, "usage_type": "call"}, {"api_name": "NeonOcean.Order.Tools.Exceptions", "line_number": 31, "usage_type": "name"}, {"api_name": "NeonOcean.Order.Interactions.Support.RegistrationShared.RegistrationExtensionAbstract", "line_number": 33, "usage_type": "attribute"}, {"api_name": "NeonOcean.Order.Interactions.Support.RegistrationShared", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 42, "usage_type": "attribute"}, {"api_name": "NeonOcean.Order.Interactions.Support.RegistrationShared.RegistrationExtensionAbstract", "line_number": 42, "usage_type": "attribute"}, {"api_name": "NeonOcean.Order.Interactions.Support.RegistrationShared", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 49, "usage_type": "attribute"}, {"api_name": "typing.Type", "line_number": 49, "usage_type": "attribute"}, {"api_name": "objects.script_object.ScriptObject", "line_number": 49, "usage_type": "attribute"}, {"api_name": "objects.script_object", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 64, "usage_type": "attribute"}, {"api_name": "typing.Type", "line_number": 64, "usage_type": "attribute"}, {"api_name": "objects.script_object.ScriptObject", "line_number": 64, "usage_type": "attribute"}, {"api_name": "objects.script_object", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 81, "usage_type": "attribute"}, {"api_name": "typing.Type", "line_number": 81, "usage_type": "attribute"}, {"api_name": "NeonOcean.Order.Interactions.Support.RegistrationShared.RegistrationExtensionAbstract", "line_number": 81, "usage_type": "attribute"}, {"api_name": "NeonOcean.Order.Interactions.Support.RegistrationShared", "line_number": 81, "usage_type": "name"}, {"api_name": "time.time", "line_number": 86, "usage_type": "call"}, {"api_name": "services.get_instance_manager", "line_number": 89, "usage_type": "call"}, {"api_name": "sims4.resources.Types", "line_number": 89, "usage_type": "attribute"}, {"api_name": "sims4.resources", "line_number": 89, "usage_type": "name"}, {"api_name": "NeonOcean.Order.Debug.Log", "line_number": 128, "usage_type": "call"}, {"api_name": "NeonOcean.Order.Debug", "line_number": 128, "usage_type": "name"}, {"api_name": "NeonOcean.Order.Debug.LogLevels", "line_number": 128, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 139, "usage_type": "call"}, {"api_name": "NeonOcean.Order.Debug.Log", "line_number": 140, "usage_type": "call"}, {"api_name": "NeonOcean.Order.Debug", "line_number": 140, "usage_type": "name"}, {"api_name": "NeonOcean.Order.Debug.LogLevels", "line_number": 140, "usage_type": "attribute"}, {"api_name": "NeonOcean.Order.Director.Announcer", "line_number": 142, "usage_type": "attribute"}, {"api_name": "NeonOcean.Order.Director", "line_number": 142, "usage_type": "name"}, {"api_name": "sims4.tuning.instance_manager.InstanceManager", "line_number": 146, "usage_type": "attribute"}, {"api_name": "sims4.tuning.instance_manager", "line_number": 146, "usage_type": "name"}, {"api_name": "sims4.resources.Types", "line_number": 147, "usage_type": "attribute"}, {"api_name": "sims4.resources", "line_number": 147, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 152, "usage_type": "attribute"}, {"api_name": "services.get_instance_manager", "line_number": 159, "usage_type": "call"}, {"api_name": "sims4.resources.Types", "line_number": 159, "usage_type": "attribute"}, {"api_name": "sims4.resources", "line_number": 159, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 165, "usage_type": "attribute"}, {"api_name": "objects.script_object.ScriptObject", "line_number": 165, "usage_type": "attribute"}, {"api_name": "objects.script_object", "line_number": 165, "usage_type": "name"}]} +{"seq_id": "367734436", "text": "from app import app, db\nfrom flask import render_template, flash, redirect, url_for, request, abort, jsonify, render_template_string\nimport os.path\nfrom app.forms import LoginForm, RegistrationForm, CommentForm, SearchForm\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom app.models import User\nfrom datetime import datetime\nfrom app.forms import EditProfileForm, PostForm, AvaEdit\nimport os\nfrom hashlib import md5\n\n\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\nwsgi_app = app.wsgi_app\n\ndef avatar_path_to(profile_name):\n avatars_path = 'C:/flask_second/app/static/avatars'\n\n avs = os.listdir(avatars_path)\n av_path = ''\n\n for el in avs:\n if profile_name == el.split('.')[0]:\n av_path += '/static/avatars/' + str(el)\n break\n\n return av_path\n\n\n@app.route('/')\n@app.route('/main', methods = [\"POST\", \"GET\"])\ndef index():\n form = SearchForm()\n dict_albs = {}\n\n if form.validate_on_submit():\n query_str = form.data['search']\n print(query_str)\n if query_str != '':\n with open(\"C:/flask_second/app/static/sites/titles.txt\") as f:\n vnl = f.readlines()\n vnl1 = []\n\n for elem in vnl:\n print(elem.split(';')[0].lower())\n if query_str.lower() in elem.split(';')[0].lower() or query_str.lower() in elem.split(';')[1].lower():\n print(elem)\n vnl1.append(elem)\n\n counter = 1\n\n if vnl1 == []:\n print('fsdjfkdsf')\n\n for vinils in vnl1:\n try:\n struct = vinils.split(\";\")\n except:\n continue\n\n path_to = r'C:/flask_second/app/static/sites/{}'.format(struct[2])\n\n if os.path.exists(path_to.strip()):\n dict_albs.update({counter: {'name': struct[1], 'album': struct[0], \"cover\": struct[2]}})\n counter += 1\n\n keys = list(dict_albs.keys())\n\n #print(dict_albs)\n #print(keys)\n #print(dict_albs[1]['cover'].strip().split('/')[1].split('.')[0])\n\n return render_template('main page.html', albums=dict_albs, keys=keys, lenalbs=len(keys), form = form)\n\n with open(\"C:/flask_second/app/static/sites/titles.txt\") as f:\n vnl = f.readlines()\n counter = 1\n\n for vinils in vnl:\n try:\n struct = vinils.split(\";\")\n except:\n continue\n\n path_to = r'C:/flask_second/app/static/sites/{}'.format(struct[2])\n\n if os.path.exists(path_to.strip()):\n dict_albs.update({counter: {'name': struct[1], 'album': struct[0], \"cover\": struct[2]}})\n counter += 1\n\n keys = list(dict_albs.keys())\n\n print(dict_albs)\n print(keys)\n print(dict_albs[1]['cover'].strip().split('/')[1].split('.')[0])\n\n return render_template('main page.html', albums = dict_albs, keys = keys, lenalbs = len(keys), form = form)\n\n@app.route('/', methods = [\"GET\", \"POST\"])\ndef pageSite(alb_name):\n global ids\n bool_cont = 0\n if current_user.is_authenticated == False:\n email = 'bogdanos@gmail.com'\n digest = md5(email.lower().encode('utf-8')).hexdigest()\n ava_not_auth = 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(digest, 128)\n else:\n ava_not_auth = ''\n\n form = CommentForm()\n if form.validate_on_submit():\n comment_str = form.comment.data\n author = current_user.username\n time_post = str(datetime.utcnow())\n time_post_str = time_post.split('.')[0]\n\n try:\n with open(\"c:/flask_second/app/static/sites/\"+str(alb_name)+\"/comments/comments.txt\", 'a') as f:\n #if ava == '':\n #f.write(str(author)+\";\"+time_post_str+\";\"+ava+' '+\";\"+comment_str+\"\\n\")\n #else:\n f.write(str(author) + \";\" + time_post_str + \";\" + \";\" + comment_str + \"\\n\")\n #path_ava = avatar_path_to(str(author))\n #if path_ava != '':\n #path_ava = \"c:/flask_second/app\" + path_ava\n #f.write(str(author) + \";\" + time_post_str + \";\" + path_ava +\";\" + comment_str + \"\\n\")\n\n except: pass\n\n dict_albs = {}\n with open(r\"C:/flask_second/app/static/sites/titles.txt\") as f:\n vnl = f.readlines()\n counter = 1\n\n for vinils in vnl:\n try:\n struct = vinils.split(\";\")\n except:\n continue\n\n path_to = r'C:/flask_second/app/static/sites/{}'.format(struct[2])\n\n if os.path.exists(path_to.strip()):\n try:\n dict_albs.update({counter: {'name': struct[1], 'album': struct[0], \"cover\": struct[2], 'label': struct[3], 'genre': struct[4], 'info': struct[5]}})\n except:\n dict_albs.update({counter: {'name': struct[1], 'album': struct[0], \"cover\": struct[2], 'label': struct[3], 'genre': struct[4], 'info': ''}})\n counter += 1\n\n keys_alb = list(dict_albs.keys())\n\n comments = {}\n try:\n with open(\"c:/flask_second/app/static/sites/\"+str(alb_name)+\"/comments/comments.txt\") as f:\n cmt = f.readlines()\n counter = 1\n\n for comment in cmt:\n if counter == 4:\n bool_cont = 1\n break\n\n try:\n struct = comment.split(';')\n except:\n continue\n\n path_to = r'C:/flask_second/app/static/sites/{}'.format(struct[2])\n\n if os.path.exists(path_to.strip()):\n comments.update({counter: {'user': struct[0], 'date': struct[1], \"cover\": struct[2], 'comment': struct[3]}})\n counter += 1\n\n bool_cont = 0\n except: pass\n\n keys = list(comments.keys())\n\n ids = 1\n\n #if len(alb_name.split('.')) == 2:\n for key in keys_alb:\n try:\n if dict_albs[key]['cover'].strip('\\n').split('/')[1].split('.')[0] == alb_name:\n ids = key\n break\n except:\n continue\n\n return render_template('album page.html', id_alb = ids, ava_not_auth = ava_not_auth, form = form, comments = comments, keys = keys, albums = dict_albs, albs_keys = keys_alb, bool_cont = bool_cont)\n\n'''@app.route('/', methods = [\"GET\", \"POST\"])\ndef pageSite(alb_name):\n print('Opened')\n global ids\n if current_user.is_authenticated == False:\n email = 'bogdanos@gmail.com'\n digest = md5(email.lower().encode('utf-8')).hexdigest()\n ava_not_auth = 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(digest, 128)\n else:\n ava_not_auth = ''\n\n form = CommentForm()\n if form.validate_on_submit():\n comment_str = form.comment.data\n author = current_user.username\n time_post = str(datetime.utcnow())\n time_post_str = time_post.split('.')[0]\n\n try:\n with open(\"c:/flask_second/app/static/sites/\"+str(alb_name)+\"/comments/comments.txt\", 'a') as f:\n #if ava == '':\n #f.write(str(author)+\";\"+time_post_str+\";\"+ava+' '+\";\"+comment_str+\"\\n\")\n #else:\n f.write(str(author) + \";\" + time_post_str + \";\" + \";\" + comment_str + \"\\n\")\n #path_ava = avatar_path_to(str(author))\n #if path_ava != '':\n #path_ava = \"c:/flask_second/app\" + path_ava\n #f.write(str(author) + \";\" + time_post_str + \";\" + path_ava +\";\" + comment_str + \"\\n\")\n\n except: pass\n\n dict_albs = {}\n with open(r\"C:/flask_second/app/static/sites/titles.txt\") as f:\n vnl = f.readlines()\n counter = 1\n\n for vinils in vnl:\n try:\n struct = vinils.split(\";\")\n except:\n continue\n\n path_to = r'C:/flask_second/app/static/sites/{}'.format(struct[2])\n\n if os.path.exists(path_to.strip()):\n dict_albs.update({counter: {'name': struct[1], 'album': struct[0], \"cover\": struct[2]}})\n counter += 1\n\n keys_alb = list(dict_albs.keys())\n\n comments = {}\n try:\n with open(\"c:/flask_second/app/static/sites/\"+str(alb_name)+\"/comments/comments.txt\") as f:\n cmt = f.readlines()\n counter = 1\n\n for comment in cmt:\n try:\n struct = comment.split(';')\n except:\n continue\n\n path_to = r'C:/flask_second/app/static/sites/{}'.format(struct[2])\n\n if os.path.exists(path_to.strip()):\n comments.update({counter: {'user': struct[0], 'date': struct[1], \"cover\": struct[2], 'comment': struct[3]}})\n counter += 1\n except: pass\n\n keys = list(comments.keys())\n\n ids = 1\n\n #if len(alb_name.split('.')) == 2:\n for key in keys_alb:\n if dict_albs[key]['cover'].strip('\\n').split('/')[1].split('.')[0] == alb_name:\n ids = key\n break\n\n return render_template('album page.html', id_alb = ids, ava_not_auth = ava_not_auth, form = form, comments = comments, keys = keys, albums = dict_albs, albs_keys = keys_alb)'''\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n@app.route('/login', methods = ['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n\n form = LoginForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n #if user is None or not user.check_password(form.password.data) or re.match(r'(?=.*[0-9])(?=.*[!@#$%^&*])(?=.*[a-z])(?=.*[A-Z])[0-9a-zA-Z!@#$%^&*]{8,}', form.password.data):\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n flash('Login requested for user {}, remember_me={}'.format(form.username.data, form.remember_me.data))\n return redirect(url_for('index'))\n\n return render_template('login page.html', form = form)\n #return render_template('login page.html')\n\n'''@app.route('/login_ajax', methods = [\"POST\"])\ndef login_ajax():\n usrn = request.form.get('lg')\n pss = request.form.get('ps')\n\n user = User.query.filter_by(username=usrn).first()\n if user is None or not user.check_password(pss):\n pass\n else:\n login_user(user)\n return redirect(url_for('index'))\n\n return jsonify(username = usrn)'''\n\n@app.route('/register', methods = ['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(username=form.username.data, email=form.email.data)\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n flash('Congratulations, you are now a registered user!')\n return redirect(url_for('login'))\n return render_template('register page.html', form = form)\n\n@app.route('/user/')\n@login_required\ndef user(username):\n av_path = avatar_path_to(username)\n\n user = User.query.filter_by(username = username).first_or_404()\n if user is None:\n abort(404)\n return render_template('user page.html', user = user, ava = av_path)\n\n@app.route('/edit_profile/', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n av_path = avatar_path_to(current_user.username)\n\n form = EditProfileForm()\n if form.validate_on_submit():\n current_user.username = form.username.data\n current_user.about_me = form.about_me.data\n db.session.commit()\n flash('Your changes have been saved.')\n return redirect(url_for('index'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.about_me.data = current_user.about_me\n return render_template('edit user page.html', title='Edit Profile', form=form, ava = av_path)\n\n# додавання фото\n@app.route('/upload_photo', methods = [\"POST\"])\ndef upload_photo():\n target = os.path.join(APP_ROOT, r'\\flask_second\\app\\static\\avatars')\n\n '''print(target)\n if not os.path.isdir(target):\n os.mkdir(target)\n else:\n print(\"Couldn't create upload directory: {}\".format(target))'''\n\n print(request.files[\"file\"])\n upload = request.files[\"file\"]\n print(upload)\n print(\"{} is the file name\".format(upload.filename))\n name_f = upload.filename.split('.')[1]\n filename = str(current_user.username) + \".\" + name_f\n files_list = []\n\n for files_f in os.listdir(target):\n files_list.append(files_f.split('.')[0])\n if current_user.username in files_list:\n str_removing_path = target + '/'+str(os.listdir(target)[files_list.index(current_user.username)])\n print(str_removing_path)\n os.remove(str_removing_path)\n\n destination = \"/\".join([target, filename])\n print(\"Accept incoming file:\", filename)\n print(\"Save it to:\", destination)\n\n upload.save(destination)\n\n return redirect(url_for('edit_profile'))\n\n@app.route('/ajax1', methods = [\"POST\"])\ndef ajax_request():\n username = []\n date = []\n comment_txt = []\n\n rfg = str(request.form.get('site_src')).split('/')[3]\n\n with open(\"c:/flask_second/app/static/sites/\" + rfg + \"/comments/comments.txt\") as f:\n fs = f.readlines()\n for line in fs[3::]:\n struct = line.strip('\\n').split(';')\n print(struct)\n username.append(struct[0])\n date.append(struct[1])\n comment_txt.append(struct[3])\n\n return jsonify(\n username = username,\n date = date,\n comment_txt = comment_txt\n )\n\n@app.route('/post', methods = ['GET', 'POST'])\ndef post_page():\n av_path = avatar_path_to(current_user.username)\n\n form = PostForm()\n if form.validate_on_submit():\n album_name_str = form.album_name.data\n artist_name_str = form.artist_name.data\n label_name_str = form.label_name.data\n genre_name_str = form.genre_name.data\n about_album_str = form.about_album.data\n\n gener_site_name = \"{}_{}\".format(artist_name_str[0:3], album_name_str[0:3])\n #image_path_str = gener_site_name\n\n target = os.path.join(APP_ROOT, r'\\flask_second\\app\\static\\sites\\{}'.format(gener_site_name))\n if os.path.exists(target) == False:\n os.mkdir(target)\n os.mkdir(target+'\\comments')\n\n with open(target + '\\comments\\comments.txt', 'w') as ff:\n pass\n\n data = '{};{};{};{};{};{}'.format(album_name_str, artist_name_str, gener_site_name, label_name_str, genre_name_str, about_album_str)\n #os.mkdir(target)\n\n with open(r\"C:/flask_second/app/static/sites/titles.txt\", 'a') as f:\n f.write(data+'\\n')\n\n flash('Your changes have been saved.')\n return redirect(url_for(\"upld\"))\n\n elif request.method == 'GET':\n form.album_name.data = ''\n form.artist_name.data = ''\n form.label_name.data = ''\n form.genre_name.data = ''\n form.about_album.data = ''\n return render_template('post page.html', title='Edit Profile', form=form, ava = av_path)\n\n@app.route('/upld', methods = [\"GET\", \"POST\"])\ndef upld():\n form = AvaEdit()\n\n if form.validate_on_submit():\n return redirect(url_for('index'))\n\n return render_template('upload cover photo.html', form = form)\n\n# додавання заставки\n@app.route('/upload_cover_photo', methods = [\"POST\"])\ndef upload_cover_photo():\n with open(r\"C:/flask_second/app/static/sites/titles.txt\") as f:\n lists = f.readlines()\n names_al = lists[-1].split(';')[2].split('/')[0]\n\n print(names_al)\n\n target = os.path.join(APP_ROOT, r'\\flask_second\\app\\static\\sites\\{}'.format(names_al))\n\n print(target)\n if not os.path.isdir(target):\n os.mkdir(target)\n else:\n print(\"Couldn't create upload directory: {}\".format(target))\n\n print(request.files[\"file\"])\n upload = request.files[\"file\"]\n print(upload)\n print(\"{} is the file name\".format(upload.filename))\n name_f = upload.filename.split('.')[1]\n filename = str(names_al) + \".\" + name_f\n files_list = []\n\n for files_f in os.listdir(target):\n files_list.append(files_f.split('.')[0])\n\n destination = \"/\".join([target, filename])\n print(\"Accept incoming file:\", filename)\n print(\"Save it to:\", destination)\n\n upload.save(destination)\n\n titles_text = []\n\n with open('C:/flask_second/app/static/sites/titles.txt') as f:\n albums_cols = f.readlines()\n for elem in albums_cols:\n titles_text.append(elem)\n\n print(titles_text)\n\n with open('C:/flask_second/app/static/sites/titles.txt', 'w') as f:\n new_new = []\n for elem in titles_text:\n elem = elem.split(';')\n if elem[2] == names_al:\n new_elem = names_al + '/' + filename\n elem[2] = new_elem\n f.write(';'.join(elem))\n\n return redirect(url_for('index'))\n\n@app.route('/about')\ndef about_page():\n return render_template('about page.html')\n\n'''@app.route('/ajax', methods = [\"POST\"])\ndef ajax_request():\n counter_comment = 3\n\n print(request.form['username'])\n print(request.form['site_src'])\n with open(\"c:/flask_second/app/static/sites/\"+str(request.form['site_src'].split('/')[2])+\"/comments/comments.txt\") as f:\n f.read()\n\n username = '232'\n date = '23^'\n comment_txt = \"ewe\"\n\n labels = {\n username: username,\n date: date,\n comment_txt: comment_txt\n }\n\n\n return jsonify(\n html_ajax_part = html_ajax_part\n )'''\n\n'''@app.route('/?search=', methods=['GET', 'POST'])\ndef search_by_name(name):\n with open(r\"C:/flask_second/app/static/sites/titles.txt\") as f:\n lines = f.readlines()\n\n for line in lines:\n line = lines.split(';')[0]\n\n return redirect(url_for('index'))'''\n\n'''@app.route('/search', methods = [\"POST\", \"GET\"])\ndef search():\n form = SearchForm()\n dict_albs = {}\n\n if form.validate_on_submit():\n query_str = form.data['search']\n print(query_str)\n if query_str != '':\n with open(\"C:/flask_second/app/static/sites/titles.txt\") as f:\n vnl = f.readlines()\n vnl1 = []\n\n for elem in vnl:\n print(elem.split(';')[0].lower())\n if query_str.lower() in elem.split(';')[0].lower() or query_str.lower() in elem.split(';')[1].lower():\n print(elem)\n vnl1.append(elem)\n\n counter = 1\n\n if vnl1 == []:\n print('fsdjfkdsf')\n\n for vinils in vnl1:\n try:\n struct = vinils.split(\";\")\n except:\n continue\n\n path_to = r'C:/flask_second/app/static/sites/{}'.format(struct[2])\n\n if os.path.exists(path_to.strip()):\n dict_albs.update({counter: {'name': struct[1], 'album': struct[0], \"cover\": struct[2]}})\n counter += 1\n\n keys = list(dict_albs.keys())\n\n #print(dict_albs)\n #print(keys)\n #print(dict_albs[1]['cover'].strip().split('/')[1].split('.')[0])\n\n form.reset()\n\n return render_template('search.html', albums=dict_albs, keys=keys, lenalbs=len(keys), form = form)\n\n form.reset()\n keys = {}\n return render_template('search.html', albums=dict_albs, keys=keys, lenalbs=len(keys), form = form)'''\n\n\n@app.before_request\ndef before_request():\n if current_user.is_authenticated and current_user.is_banned:\n return \"You banned\"\n elif current_user.is_authenticated:\n #current_user.last_seen = str(datetime.utcnow()).split('.')[0]\n current_user.last_seen = datetime.utcnow()\n db.session.commit()\n", "sub_path": "app/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 20383, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "app.app.wsgi_app", "line_number": 14, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 14, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "app.forms.SearchForm", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 97, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 30, "usage_type": "call"}, {"api_name": "app.app", "line_number": 30, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 31, "usage_type": "call"}, {"api_name": "app.app", "line_number": 31, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 103, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 105, "usage_type": "call"}, {"api_name": "app.forms.CommentForm", "line_number": 110, "usage_type": "call"}, {"api_name": "flask_login.current_user.username", "line_number": 113, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 113, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 114, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 190, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 99, "usage_type": "call"}, {"api_name": "app.app", "line_number": 99, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 275, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 276, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 276, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 273, "usage_type": "call"}, {"api_name": "app.app", "line_number": 273, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 280, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 280, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 281, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 281, "usage_type": "call"}, {"api_name": "app.forms.LoginForm", "line_number": 283, "usage_type": "call"}, {"api_name": "app.models.User.query.filter_by", "line_number": 286, "usage_type": "call"}, {"api_name": "app.models.User.query", "line_number": 286, "usage_type": "attribute"}, {"api_name": "app.models.User", "line_number": 286, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 289, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 290, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 290, "usage_type": "call"}, {"api_name": "flask_login.login_user", "line_number": 291, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 292, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 293, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 293, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 295, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 278, "usage_type": "call"}, {"api_name": "app.app", "line_number": 278, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 314, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 314, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 315, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 315, "usage_type": "call"}, {"api_name": "app.forms.RegistrationForm", "line_number": 316, "usage_type": "call"}, {"api_name": "app.models.User", "line_number": 318, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 320, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 320, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 320, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 321, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 321, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 321, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 322, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 323, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 323, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 324, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 312, "usage_type": "call"}, {"api_name": "app.app", "line_number": 312, "usage_type": "name"}, {"api_name": "app.models.User.query.filter_by", "line_number": 331, "usage_type": "call"}, {"api_name": "app.models.User.query", "line_number": 331, "usage_type": "attribute"}, {"api_name": "app.models.User", "line_number": 331, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 333, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 334, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 326, "usage_type": "call"}, {"api_name": "app.app", "line_number": 326, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 327, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 339, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 339, "usage_type": "name"}, {"api_name": "app.forms.EditProfileForm", "line_number": 341, "usage_type": "call"}, {"api_name": "flask_login.current_user.username", "line_number": 343, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 343, "usage_type": "name"}, {"api_name": "flask_login.current_user.about_me", "line_number": 344, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 344, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 345, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 345, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 345, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 346, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 347, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 347, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 348, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 348, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 349, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 349, "usage_type": "name"}, {"api_name": "flask_login.current_user.about_me", "line_number": 350, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 350, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 351, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 336, "usage_type": "call"}, {"api_name": "app.app", "line_number": 336, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 337, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 356, "usage_type": "call"}, {"api_name": "os.path", "line_number": 356, "usage_type": "attribute"}, {"api_name": "flask.request.files", "line_number": 364, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 364, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 365, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 365, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 369, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 369, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 372, "usage_type": "call"}, {"api_name": "flask_login.current_user.username", "line_number": 374, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 374, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 375, "usage_type": "call"}, {"api_name": "flask_login.current_user.username", "line_number": 375, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 375, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 377, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 385, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 385, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 354, "usage_type": "call"}, {"api_name": "app.app", "line_number": 354, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 393, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 393, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 393, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 404, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 387, "usage_type": "call"}, {"api_name": "app.app", "line_number": 387, "usage_type": "name"}, {"api_name": "flask_login.current_user.username", "line_number": 412, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 412, "usage_type": "name"}, {"api_name": "app.forms.PostForm", "line_number": 414, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 425, "usage_type": "call"}, {"api_name": "os.path", "line_number": 425, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 426, "usage_type": "call"}, {"api_name": "os.path", "line_number": 426, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 427, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 428, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 439, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 440, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 440, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 442, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 442, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 448, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 410, "usage_type": "call"}, {"api_name": "app.app", "line_number": 410, "usage_type": "name"}, {"api_name": "app.forms.AvaEdit", "line_number": 452, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 455, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 455, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 457, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 450, "usage_type": "call"}, {"api_name": "app.app", "line_number": 450, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 468, "usage_type": "call"}, {"api_name": "os.path", "line_number": 468, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 471, "usage_type": "call"}, {"api_name": "os.path", "line_number": 471, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 472, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 476, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 476, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 477, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 477, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 484, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 511, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 511, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 460, "usage_type": "call"}, {"api_name": "app.app", "line_number": 460, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 515, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 513, "usage_type": "call"}, {"api_name": "app.app", "line_number": 513, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 604, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 604, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_banned", "line_number": 604, "usage_type": "attribute"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 606, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 606, "usage_type": "name"}, {"api_name": "flask_login.current_user.last_seen", "line_number": 608, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 608, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 608, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 608, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 609, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 609, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 609, "usage_type": "name"}, {"api_name": "app.app.before_request", "line_number": 602, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 602, "usage_type": "name"}]} +{"seq_id": "615904706", "text": "#!/bin/env python\n\n\"\"\"\nNews module containing a news class that hold a list of Pages\nit updates and displays titles and relevants info from those pages\nrandomly and periodicaly, updating and displaying is executed in\n2 seperate threads\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport random\nimport time\nfrom threading import Thread\n\nfrom requests import ConnectionError\nfrom requests.exceptions import HTTPError, Timeout\nfrom page import (\n\t\tDaa,\n\t\tBeamNG,\n\t\tCosmoteer,\n\t\tMythologic,\n\t\tProjectZomboid,\n\t\tRedditRimWorld,\n\t\tRedditUnixporn,\n\t\tRedditVim,\n\t\tRedditWebdev,\n\t\tHackerNoon,\n\t\tFreecodecamp\n\t\t)\nfrom util import color_bash as cb\n\nclass News(object):\n\t\"\"\" Store a list a Page, update and print in periodically \"\"\"\n\n\tdef __init__(self, pages):\n\t\tdirname = os.path.dirname(os.path.realpath(__file__))\n\t\tself.url_file = os.path.join(dirname, 'news_url')\n\t\tself.pages = pages\n\t\tself.size = len(self.pages)\n\t\tself.index = -1\n\n\tdef __repr__(self):\n\t\treturn '{}({})'.format(self.__class__.__name__, self.pages)\n\n\tdef _get_page_index(self):\n\t\t\"\"\"\n\t\tGet page index randomly. the probability of\n\t\teach index is proportional to its title count\n\t\t\"\"\"\n\n\t\trand_list = []\n\t\tfor page_index in range(0, self.size):\n\t\t\trand_list.extend([page_index] * len(self.pages[page_index]))\n\t\treturn random.choice(rand_list)\n\n\tdef _get_index(self):\n\t\t\"\"\"\n\t\tGet current index of pages list to update the webpage\n\t\tcontent to avoid updating all webpages at once\n\t\t\"\"\"\n\n\t\tself.index += 1\n\t\tif self.index > self.size - 1:\n\t\t\tself.index = 0\n\t\treturn self.index\n\n\tdef _is_content_avail(self):\n\t\t\"\"\" return True if there is content in one of the pages \"\"\"\n\n\t\tfor page in self.pages:\n\t\t\tif page.content:\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef _get_random_index(self):\n\t\t\"\"\"\n\t\tReturn a tuple of random indexes for Page and Page.content['title'] which does 2 things:\n\t\t\tGet a random page\n\t\t\tGet a random title in that page\n\t\t\"\"\"\n\t\tpage_index = self._get_page_index()\n\n\t\t# TODO: Move this to test.py\n\t\t# Throw error if title list len is zero\n\t\terr_msg = '{}\\'s title selector not available'.format(self.pages[page_index].name)\n\t\tassert 'title' in self.pages[page_index].selector, err_msg\n\n\t\ttry:\n\t\t\ttitle_index = random.randint(0, len(self.pages[page_index].content) - 1)\n\t\texcept ValueError: # self.content is empty list => random.randint(0, -1)\n\t\t\treturn page_index, None\n\n\t\treturn page_index, title_index\n\n\tdef update_all(self):\n\t\t\"\"\" Update all pages in self.pages list once (dont update againt if failed) \"\"\"\n\t\tlogging.info(cb('[update all] starting...', 'magenta'))\n\t\ttry:\n\t\t\tfor index in range(self.size):\n\t\t\t\tlogging.info(cb('[update all] update ', 'magenta') + cb(self.pages[index].name, 'green'))\n\t\t\t\tself.pages[index].update()\n\t\texcept (HTTPError, Timeout, ConnectionError):\n\t\t\tlogging.info(cb('update failed: ', 'red'))\n\t\tlogging.info(cb('[update all] finished', 'green'))\n\n\tdef update_news(self):\n\t\t\"\"\"\n\t\tUpdate news periodically, endless loop,\n\t\tuse in parellel with display_news\n\t\t\"\"\"\n\n\t\tself.update_all()\n\n\t\tindex = self._get_index()\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tself.pages[index].update()\n\t\t\texcept (HTTPError, Timeout, ConnectionError):\n\t\t\t\tlogging.info(cb('update failed: ', 'red'))\n\t\t\t\ttime.sleep(2)\n\t\t\telse:\n\t\t\t\tlogging.info(cb('update success', 'green'))\n\t\t\t\ttime.sleep(30)\n\t\t\tfinally:\n\t\t\t\tindex = self._get_index()\n\n\tdef display_news(self):\n\t\t\"\"\"\n\t\tDisplay news periodically, endless loop,\n\t\tuse in parellel with update_news\n\t\t\"\"\"\n\n\t\tpage_index, title_index = self._get_random_index()\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tself.pages[page_index].display(title_index)\n\t\t\texcept TypeError: # self.content is empty => title_index = None\n\t\t\t\tlogging.info(cb('display failed', 'red'))\n\t\t\t\ttime.sleep(0)\n\t\t\telse:\n\t\t\t\tlogging.info(cb('display success', 'green'))\n\t\t\t\tself._export_link(self.pages[page_index].get_link(title_index))\n\t\t\t\ttime.sleep(20)\n\t\t\tfinally:\n\t\t\t\tpage_index, title_index = self._get_random_index()\n\n\tdef _export_link(self, link):\n\t\t\"\"\"\n\t\tExport link of current title displayed\n\t\ton polybar to $(pwd)/news_url file\n\t\t\"\"\"\n\n\t\twith open(self.url_file, 'w') as file:\n\t\t\tfile.write(link)\n\n\tdef start(self):\n\t\t\"\"\" Start endless loop of scraping and displaying news \"\"\"\n\n\t\tupdate = Thread(target=lambda: self.update_news())\n\t\tdisplay = Thread(target=lambda: self.display_news())\n\n\t\tupdate.start()\n\t\tlogging.info(cb('update.start()', 'blue'))\n\n\t\t# Only display if there is at least one page fetch successfully\n\t\t# because display thread will keep dicing for another page if\n\t\t# the last one is not successful\n\t\twhile not self._is_content_avail():\n\t\t\tlogging.info(cb('content not available', 'red'))\n\t\t\ttime.sleep(3)\n\t\tdisplay.start()\n\t\tlogging.info(cb('display.start()', 'blue'))\n\n\t\tupdate.join()\n\t\tdisplay.join()\n\ndef main():\n\t\"\"\" main function \"\"\"\n\n\tpages = [\n\t\t\tDaa(),\n\t\t\tBeamNG(),\n\t\t\tCosmoteer(),\n\t\t\tMythologic(),\n\t\t\tProjectZomboid(),\n\t\t\tRedditRimWorld(),\n\t\t\tRedditUnixporn(),\n\t\t\tRedditVim(),\n\t\t\tRedditWebdev(),\n\t\t\tHackerNoon(),\n\t\t\tFreecodecamp()\n\t\t\t]\n\n\tnews = News(pages)\n\tnews.start()\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser(description='Show headlines from various websites on polybar')\n\tparser.add_argument('log', nargs='?', help='Logging for debug or not')\n\targ = parser.parse_args()\n\n\tif arg.log != 'debug':\n\t\tmain()\n\telse:\n\t\tpage_list = [\n\t\t\t\tDaa(),\n\t\t\t\tBeamNG(),\n\t\t\t\tCosmoteer(),\n\t\t\t\tMythologic(),\n\t\t\t\tProjectZomboid(),\n\t\t\t\tRedditRimWorld(),\n\t\t\t\tRedditUnixporn(),\n\t\t\t\tRedditVim(),\n\t\t\t\tRedditWebdev(),\n\t\t\t\tHackerNoon(),\n\t\t\t\tFreecodecamp()\n\t\t\t\t]\n\n\t\t# Shut up the request module logger\n\t\tlogging.getLogger(\"requests\").setLevel(logging.WARNING)\n\t\tlogging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n\t\t# Setup logging\n\t\tlogging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.DEBUG)\n\n\t\tnews = News(page_list)\n\t\tnews.start()\n\t\t# news.update_all()\n\n\t\ttotal_list = [0] * news.size\n\t\tfor i in range(0, 200):\n\t\t\tpi, _ = news._get_random_index()\n\t\t\ttotal_list[pi] += 1\n\t\t\tprint(pi)\n\n\t\tfor i in range(0, news.size):\n\t\t\tprint('\\n' + news.pages[i].name + '\\' title count: ' + str(len(news.pages[i])))\n\t\tprint(str(total_list))\n\n\t\t# print(news)\n\t\t# test_index = 5\n\t\t# news.pages[test_index].update()\n\t\t# news.pages[test_index].display_all()\n\n\t\t# print()\n\t\t# for i in range(0, len(news.pages[test_index].content)):\n\t\t# \tnews.pages[test_index].display(i)\n\t\t# \tprint(news.pages[test_index].get_link(i))\n\n# vim: nofoldenable\n", "sub_path": ".config/polybar/news/news.py", "file_name": "news.py", "file_ext": "py", "file_size_in_byte": 6338, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.dirname", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 56, "usage_type": "call"}, {"api_name": "page.content", "line_number": 73, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 91, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 99, "usage_type": "call"}, {"api_name": "util.color_bash", "line_number": 99, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 102, "usage_type": "call"}, {"api_name": "util.color_bash", "line_number": 102, "usage_type": "call"}, {"api_name": "requests.exceptions.HTTPError", "line_number": 104, "usage_type": "name"}, {"api_name": "requests.exceptions.Timeout", "line_number": 104, "usage_type": "name"}, {"api_name": "requests.ConnectionError", "line_number": 104, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 105, "usage_type": "call"}, {"api_name": "util.color_bash", "line_number": 105, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 106, "usage_type": "call"}, {"api_name": "util.color_bash", "line_number": 106, "usage_type": "call"}, {"api_name": "requests.exceptions.HTTPError", "line_number": 120, "usage_type": "name"}, {"api_name": "requests.exceptions.Timeout", "line_number": 120, "usage_type": "name"}, {"api_name": "requests.ConnectionError", "line_number": 120, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 121, "usage_type": "call"}, {"api_name": "util.color_bash", "line_number": 121, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 122, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 124, "usage_type": "call"}, {"api_name": "util.color_bash", "line_number": 124, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 125, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 141, "usage_type": "call"}, {"api_name": "util.color_bash", "line_number": 141, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 142, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 144, "usage_type": "call"}, {"api_name": "util.color_bash", "line_number": 144, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 146, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 162, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 163, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 166, "usage_type": "call"}, {"api_name": "util.color_bash", "line_number": 166, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 172, "usage_type": "call"}, {"api_name": "util.color_bash", "line_number": 172, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 173, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 175, "usage_type": "call"}, {"api_name": "util.color_bash", "line_number": 175, "usage_type": "call"}, {"api_name": "page.Daa", "line_number": 184, "usage_type": "call"}, {"api_name": "page.BeamNG", "line_number": 185, "usage_type": "call"}, {"api_name": "page.Cosmoteer", "line_number": 186, "usage_type": "call"}, {"api_name": "page.Mythologic", "line_number": 187, "usage_type": "call"}, {"api_name": "page.ProjectZomboid", "line_number": 188, "usage_type": "call"}, {"api_name": "page.RedditRimWorld", "line_number": 189, "usage_type": "call"}, {"api_name": "page.RedditUnixporn", "line_number": 190, "usage_type": "call"}, {"api_name": "page.RedditVim", "line_number": 191, "usage_type": "call"}, {"api_name": "page.RedditWebdev", "line_number": 192, "usage_type": "call"}, {"api_name": "page.HackerNoon", "line_number": 193, "usage_type": "call"}, {"api_name": "page.Freecodecamp", "line_number": 194, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 202, "usage_type": "call"}, {"api_name": "page.Daa", "line_number": 210, "usage_type": "call"}, {"api_name": "page.BeamNG", "line_number": 211, "usage_type": "call"}, {"api_name": "page.Cosmoteer", "line_number": 212, "usage_type": "call"}, {"api_name": "page.Mythologic", "line_number": 213, "usage_type": "call"}, {"api_name": "page.ProjectZomboid", "line_number": 214, "usage_type": "call"}, {"api_name": "page.RedditRimWorld", "line_number": 215, "usage_type": "call"}, {"api_name": "page.RedditUnixporn", "line_number": 216, "usage_type": "call"}, {"api_name": "page.RedditVim", "line_number": 217, "usage_type": "call"}, {"api_name": "page.RedditWebdev", "line_number": 218, "usage_type": "call"}, {"api_name": "page.HackerNoon", "line_number": 219, "usage_type": "call"}, {"api_name": "page.Freecodecamp", "line_number": 220, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 224, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 224, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 225, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 225, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 227, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 227, "usage_type": "attribute"}]} +{"seq_id": "562090630", "text": "import cv2\nimport numpy as np\n\nimg = cv2.imread('Mavic Air Fly.jpeg')\ngrayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nsurf = cv2.xfeatures2d.SURF_create(400, upright=True)\nkp, des = surf.detectAndCompute(grayImg, None)\n\nimg2 = cv2.drawKeypoints(img, kp, None, (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\ncv2.imshow('SURF Image', img2)\ncv2.imwrite('surfDetect.jpg', img2)\ncv2.waitKey()\ncv2.destroyAllWindows()\n", "sub_path": "Class 5 Feature Detection and Description/C5 Surf Detection.py", "file_name": "C5 Surf Detection.py", "file_ext": "py", "file_size_in_byte": 423, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 5, "usage_type": "attribute"}, {"api_name": "cv2.xfeatures2d.SURF_create", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.drawKeypoints", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "257742785", "text": "from matplotlib import pyplot as plt\nfrom matplotlib import style\n\nstyle.use( 'dark_background' )\n#style.use( 'ggplot' )\n# The two axis should have the same first dimensional list length\nx = [5,6,7,8]\n\ny = [7,3,8,3]\n\nx2 = [ 6, 10, 2, 7 ]\ny2 = [ 3, 9, 6, 6 ]\n\nplt.plot( x, y, 'g', linewidth=5 )\nplt.plot( x2, y2, 'c', linewidth=10 )\n\nplt.title( 'Epic Chart' )\nplt.ylabel( 'Y Axis' )\nplt.xlabel( 'X Axis' )\n\nplt.show()\n", "sub_path": "Python/MatPlotLibStyles.py", "file_name": "MatPlotLibStyles.py", "file_ext": "py", "file_size_in_byte": 417, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "matplotlib.style.use", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 4, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "314380172", "text": "\n# This is Multi MNIST As per Sara-Sabour's paper\nimport random\nimport torch\nimport numpy as np\nfrom torchvision import datasets\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\n\nrandom.seed(4)\nnp.random.seed(4)\ntorch.manual_seed(4)\nimport torchvision.transforms as transforms\n\n\n\ndef shift_write_multi_mnist(input_dataset, file_prefix, shift, pad, max_shard,\n num_pairs):\n \"\"\"Writes the transformed duplicated data as tfrecords.\n Since the generated dataset is quite large, shards the output files. During\n writing selects the writer for each example randomly to diversify the range\n of labels in each file.\n Pads the data by adding zeros. Shifts all images randomly. For each image\n randomly selects a set of other images with different label as its pair.\n Aggregates the image pair with a maximum pixel value of 255.\n Writes overlayed pairs of shifted images as tf.train.Example in tfrecords\n files.\n Args:\n input_dataset: A list of tuples containing corresponding images and labels.\n file_prefix: String, prefix of the name of the resultant sharded tfrecord\n file.\n shift: Integer, the shift range for images.\n pad: Integer, the number of pixels to be padded.\n max_shard: Integer, maximum number of examples in each shard.\n num_pairs: Integer, number of pairs of images generated for each input\n image.\n \"\"\"\n num_images = len(input_dataset)\n\n # writers, writer_turns = sharded_writers(num_images * num_pairs, max_shard,\n # num_images, file_prefix)\n\n random_shifts = np.random.randint(-shift, shift + 1,\n (num_images, num_pairs + 1, 2))\n \n dataset = [(np.pad(image, pad, 'constant'), label)\n for (image, label) in input_dataset]\n\n for i, (base_image, base_label) in enumerate(dataset):\n \n # Shift each image\n base_shifted = shift_2d(base_image, random_shifts[i, 0, :], shift).astype(\n np.uint8)\n\n # Choose (2*num_pair) images out of num_images. \n choices = np.random.choice(num_images, 2 * num_pairs, replace=False)\n chosen_dataset = []\n \n #\n for choice in choices:\n if dataset[choice][1] != base_label:\n chosen_dataset.append(dataset[choice])\n \n for j, (top_image, top_label) in enumerate(chosen_dataset[:num_pairs]):\n top_shifted = shift_2d(top_image, random_shifts[i, j + 1, :],\n shift).astype(np.uint8)\n merged = np.add(base_shifted, top_shifted, dtype=np.int32)\n merged = np.minimum(merged, 255).astype(np.uint8)\n \n\n\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'height': int64_feature(IMAGE_SIZE_PX + 2 * pad),\n 'width': int64_feature(IMAGE_SIZE_PX + 2 * pad),\n 'depth': int64_feature(1),\n 'label_1': int64_feature(base_label),\n 'label_2': int64_feature(top_label),\n 'image_raw_1': bytes_feature(base_shifted.tostring()),\n 'image_raw_2': bytes_feature(top_shifted.tostring()),\n 'merged_raw': bytes_feature(merged.tostring()),\n }))\n writers[writer_turns[i, j]].write(example.SerializeToString())\n\n for writer in writers:\n writer.close()", "sub_path": "Custom_datasets/DataMultiMNIST.py", "file_name": "DataMultiMNIST.py", "file_ext": "py", "file_size_in_byte": 3323, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "random.seed", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.pad", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.add", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.minimum", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 68, "usage_type": "attribute"}]} +{"seq_id": "244840289", "text": "# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"\nBy default, the AWS KMS keyring uses the default configurations\nfor all AWS KMS clients and uses the default discoverable credentials.\nIf you need to change this configuration,\nyou can configure the client supplier.\n\nThis example shows how to use custom-configured clients with the AWS KMS keyring.\n\nhttps://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/choose-keyring.html#use-kms-keyring\n\nFor an example of how to use the AWS KMS keyring with CMKs in multiple regions,\nsee the ``keyring/aws_kms/multiple_regions`` example.\n\nFor another example of how to use the AWS KMS keyring with custom client configuration,\nsee the ``keyring/aws_kms/custom_client_supplier`` example.\n\nFor examples of how to use the AWS KMS keyring in discovery mode on decrypt,\nsee the ``keyring/aws_kms/discovery_decrypt``,\n``keyring/aws_kms/discovery_decrypt_in_region_only``,\nand ``keyring/aws_kms/discovery_decrypt_with_preferred_region`` examples.\n\"\"\"\nfrom botocore.config import Config\nfrom botocore.session import Session\n\nimport aws_encryption_sdk\nfrom aws_encryption_sdk.identifiers import USER_AGENT_SUFFIX\nfrom aws_encryption_sdk.keyrings.aws_kms import AwsKmsKeyring\nfrom aws_encryption_sdk.keyrings.aws_kms.client_suppliers import DefaultClientSupplier\n\n\ndef run(aws_kms_cmk, source_plaintext):\n # type: (str, bytes) -> None\n \"\"\"Demonstrate an encrypt/decrypt cycle using an AWS KMS keyring with custom AWS KMS client configuration.\n\n :param str aws_kms_cmk: The ARN of an AWS KMS CMK that protects data keys\n :param bytes source_plaintext: Plaintext to encrypt\n \"\"\"\n # Prepare your encryption context.\n # Remember that your encryption context is NOT SECRET.\n # https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/concepts.html#encryption-context\n encryption_context = {\n \"encryption\": \"context\",\n \"is not\": \"secret\",\n \"but adds\": \"useful metadata\",\n \"that can help you\": \"be confident that\",\n \"the data you are handling\": \"is what you think it is\",\n }\n\n # Prepare your custom configuration values.\n #\n # Set your custom connection timeout value.\n # https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html\n custom_client_config = Config(connect_timeout=10.0, user_agent_extra=USER_AGENT_SUFFIX)\n # For this example we will just use the default botocore session configuration\n # but if you need to, you can set custom credentials in the botocore session.\n custom_session = Session()\n\n # Use your custom configuration values to configure your client supplier.\n client_supplier = DefaultClientSupplier(botocore_session=custom_session, client_config=custom_client_config)\n\n # Create the keyring that determines how your data keys are protected,\n # providing the client supplier that you created.\n keyring = AwsKmsKeyring(generator_key_id=aws_kms_cmk, client_supplier=client_supplier)\n\n # Encrypt your plaintext data.\n ciphertext, _encrypt_header = aws_encryption_sdk.encrypt(\n source=source_plaintext, encryption_context=encryption_context, keyring=keyring\n )\n\n # Demonstrate that the ciphertext and plaintext are different.\n assert ciphertext != source_plaintext\n\n # Decrypt your encrypted data using the same keyring you used on encrypt.\n #\n # You do not need to specify the encryption context on decrypt\n # because the header of the encrypted message includes the encryption context.\n decrypted, decrypt_header = aws_encryption_sdk.decrypt(source=ciphertext, keyring=keyring)\n\n # Demonstrate that the decrypted plaintext is identical to the original plaintext.\n assert decrypted == source_plaintext\n\n # Verify that the encryption context used in the decrypt operation includes\n # the encryption context that you specified when encrypting.\n # The AWS Encryption SDK can add pairs, so don't require an exact match.\n #\n # In production, always use a meaningful encryption context.\n assert set(encryption_context.items()) <= set(decrypt_header.encryption_context.items())\n", "sub_path": "examples/src/keyring/aws_kms/custom_kms_client_config.py", "file_name": "custom_kms_client_config.py", "file_ext": "py", "file_size_in_byte": 4180, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "botocore.config.Config", "line_number": 55, "usage_type": "call"}, {"api_name": "aws_encryption_sdk.identifiers.USER_AGENT_SUFFIX", "line_number": 55, "usage_type": "name"}, {"api_name": "botocore.session.Session", "line_number": 58, "usage_type": "call"}, {"api_name": "aws_encryption_sdk.keyrings.aws_kms.client_suppliers.DefaultClientSupplier", "line_number": 61, "usage_type": "call"}, {"api_name": "aws_encryption_sdk.keyrings.aws_kms.AwsKmsKeyring", "line_number": 65, "usage_type": "call"}, {"api_name": "aws_encryption_sdk.encrypt", "line_number": 68, "usage_type": "call"}, {"api_name": "aws_encryption_sdk.decrypt", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "236267033", "text": "# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport json\nimport os.path\nimport re\nimport signal\nimport time\n\nfrom ducktape.services.service import Service\nfrom ducktape.utils.util import wait_until\nfrom ducktape.cluster.remoteaccount import RemoteCommandError\n\nfrom config import KafkaConfig\nfrom kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin\nfrom kafkatest.services.kafka import config_property\nfrom kafkatest.services.monitor.jmx import JmxMixin\nfrom kafkatest.services.security.minikdc import MiniKdc\nfrom kafkatest.services.security.security_config import SecurityConfig\nfrom kafkatest.version import DEV_BRANCH\n\nPort = collections.namedtuple('Port', ['name', 'number', 'open'])\n\n\nclass KafkaService(KafkaPathResolverMixin, JmxMixin, Service):\n PERSISTENT_ROOT = \"/mnt/kafka\"\n STDOUT_STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, \"server-start-stdout-stderr.log\")\n LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, \"kafka-log4j.properties\")\n # Logs such as controller.log, server.log, etc all go here\n OPERATIONAL_LOG_DIR = os.path.join(PERSISTENT_ROOT, \"kafka-operational-logs\")\n OPERATIONAL_LOG_INFO_DIR = os.path.join(OPERATIONAL_LOG_DIR, \"info\")\n OPERATIONAL_LOG_DEBUG_DIR = os.path.join(OPERATIONAL_LOG_DIR, \"debug\")\n # Kafka log segments etc go here\n DATA_LOG_DIR_PREFIX = os.path.join(PERSISTENT_ROOT, \"kafka-data-logs\")\n DATA_LOG_DIR_1 = \"%s-1\" % (DATA_LOG_DIR_PREFIX)\n DATA_LOG_DIR_2 = \"%s-2\" % (DATA_LOG_DIR_PREFIX)\n CONFIG_FILE = os.path.join(PERSISTENT_ROOT, \"kafka.properties\")\n # Kafka Authorizer\n SIMPLE_AUTHORIZER = \"kafka.security.auth.SimpleAclAuthorizer\"\n\n logs = {\n \"kafka_server_start_stdout_stderr\": {\n \"path\": STDOUT_STDERR_CAPTURE,\n \"collect_default\": True},\n \"kafka_operational_logs_info\": {\n \"path\": OPERATIONAL_LOG_INFO_DIR,\n \"collect_default\": True},\n \"kafka_operational_logs_debug\": {\n \"path\": OPERATIONAL_LOG_DEBUG_DIR,\n \"collect_default\": False},\n \"kafka_data_1\": {\n \"path\": DATA_LOG_DIR_1,\n \"collect_default\": False},\n \"kafka_data_2\": {\n \"path\": DATA_LOG_DIR_2,\n \"collect_default\": False}\n }\n\n def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAINTEXT, interbroker_security_protocol=SecurityConfig.PLAINTEXT,\n client_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI, interbroker_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,\n authorizer_class_name=None, topics=None, version=DEV_BRANCH, jmx_object_names=None,\n jmx_attributes=None, zk_connect_timeout=5000, zk_session_timeout=6000, server_prop_overides=[], zk_chroot=None):\n \"\"\"\n :type context\n :type zk: ZookeeperService\n :type topics: dict\n \"\"\"\n Service.__init__(self, context, num_nodes)\n JmxMixin.__init__(self, num_nodes, jmx_object_names, jmx_attributes or [],\n root=KafkaService.PERSISTENT_ROOT)\n\n self.zk = zk\n\n self.security_protocol = security_protocol\n self.interbroker_security_protocol = interbroker_security_protocol\n self.client_sasl_mechanism = client_sasl_mechanism\n self.interbroker_sasl_mechanism = interbroker_sasl_mechanism\n self.topics = topics\n self.minikdc = None\n self.authorizer_class_name = authorizer_class_name\n self.zk_set_acl = False\n self.server_prop_overides = server_prop_overides\n self.log_level = \"DEBUG\"\n self.zk_chroot = zk_chroot\n\n #\n # In a heavily loaded and not very fast machine, it is\n # sometimes necessary to give more time for the zk client\n # to have its session established, especially if the client\n # is authenticating and waiting for the SaslAuthenticated\n # in addition to the SyncConnected event.\n #\n # The defaut value for zookeeper.connect.timeout.ms is\n # 2 seconds and here we increase it to 5 seconds, but\n # it can be overriden by setting the corresponding parameter\n # for this constructor.\n self.zk_connect_timeout = zk_connect_timeout\n\n # Also allow the session timeout to be provided explicitly,\n # primarily so that test cases can depend on it when waiting\n # e.g. brokers to deregister after a hard kill.\n self.zk_session_timeout = zk_session_timeout\n\n self.port_mappings = {\n 'PLAINTEXT': Port('PLAINTEXT', 9092, False),\n 'SSL': Port('SSL', 9093, False),\n 'SASL_PLAINTEXT': Port('SASL_PLAINTEXT', 9094, False),\n 'SASL_SSL': Port('SASL_SSL', 9095, False)\n }\n\n for node in self.nodes:\n node.version = version\n node.config = KafkaConfig(**{config_property.BROKER_ID: self.idx(node)})\n\n\n def set_version(self, version):\n for node in self.nodes:\n node.version = version\n\n @property\n def security_config(self):\n config = SecurityConfig(self.context, self.security_protocol, self.interbroker_security_protocol,\n zk_sasl=self.zk.zk_sasl,\n client_sasl_mechanism=self.client_sasl_mechanism, interbroker_sasl_mechanism=self.interbroker_sasl_mechanism)\n for protocol in self.port_mappings:\n port = self.port_mappings[protocol]\n if port.open:\n config.enable_security_protocol(port.name)\n return config\n\n def open_port(self, protocol):\n self.port_mappings[protocol] = self.port_mappings[protocol]._replace(open=True)\n\n def close_port(self, protocol):\n self.port_mappings[protocol] = self.port_mappings[protocol]._replace(open=False)\n\n def start_minikdc(self, add_principals=\"\"):\n if self.security_config.has_sasl:\n if self.minikdc is None:\n self.minikdc = MiniKdc(self.context, self.nodes, extra_principals = add_principals)\n self.minikdc.start()\n else:\n self.minikdc = None\n\n def alive(self, node):\n return len(self.pids(node)) > 0\n\n def start(self, add_principals=\"\"):\n self.open_port(self.security_protocol)\n self.open_port(self.interbroker_security_protocol)\n\n self.start_minikdc(add_principals)\n Service.start(self)\n\n # Create topics if necessary\n if self.topics is not None:\n for topic, topic_cfg in self.topics.items():\n if topic_cfg is None:\n topic_cfg = {}\n\n topic_cfg[\"topic\"] = topic\n self.create_topic(topic_cfg)\n\n def set_protocol_and_port(self, node):\n listeners = []\n advertised_listeners = []\n\n for protocol in self.port_mappings:\n port = self.port_mappings[protocol]\n if port.open:\n listeners.append(port.name + \"://:\" + str(port.number))\n advertised_listeners.append(port.name + \"://\" + node.account.hostname + \":\" + str(port.number))\n\n self.listeners = ','.join(listeners)\n self.advertised_listeners = ','.join(advertised_listeners)\n\n def prop_file(self, node):\n cfg = KafkaConfig(**node.config)\n cfg[config_property.ADVERTISED_HOSTNAME] = node.account.hostname\n cfg[config_property.ZOOKEEPER_CONNECT] = self.zk_connect_setting()\n\n for prop in self.server_prop_overides:\n cfg[prop[0]] = prop[1]\n\n self.set_protocol_and_port(node)\n\n # TODO - clean up duplicate configuration logic\n prop_file = cfg.render()\n prop_file += self.render('kafka.properties', node=node, broker_id=self.idx(node),\n security_config=self.security_config, num_nodes=self.num_nodes)\n return prop_file\n\n def start_cmd(self, node):\n cmd = \"export JMX_PORT=%d; \" % self.jmx_port\n cmd += \"export KAFKA_LOG4J_OPTS=\\\"-Dlog4j.configuration=file:%s\\\"; \" % self.LOG4J_CONFIG\n cmd += \"export KAFKA_OPTS=%s; \" % self.security_config.kafka_opts\n cmd += \"%s %s 1>> %s 2>> %s &\" % \\\n (self.path.script(\"kafka-server-start.sh\", node),\n KafkaService.CONFIG_FILE,\n KafkaService.STDOUT_STDERR_CAPTURE,\n KafkaService.STDOUT_STDERR_CAPTURE)\n return cmd\n\n def start_node(self, node):\n node.account.mkdirs(KafkaService.PERSISTENT_ROOT)\n prop_file = self.prop_file(node)\n self.logger.info(\"kafka.properties:\")\n self.logger.info(prop_file)\n node.account.create_file(KafkaService.CONFIG_FILE, prop_file)\n node.account.create_file(self.LOG4J_CONFIG, self.render('log4j.properties', log_dir=KafkaService.OPERATIONAL_LOG_DIR))\n\n self.security_config.setup_node(node)\n self.security_config.setup_credentials(node, self.path, self.zk_connect_setting(), broker=True)\n\n cmd = self.start_cmd(node)\n self.logger.debug(\"Attempting to start KafkaService on %s with command: %s\" % (str(node.account), cmd))\n with node.account.monitor_log(KafkaService.STDOUT_STDERR_CAPTURE) as monitor:\n node.account.ssh(cmd)\n # Kafka 1.0.0 and higher don't have a space between \"Kafka\" and \"Server\"\n monitor.wait_until(\"Kafka\\s*Server.*started\", timeout_sec=30, backoff_sec=.25, err_msg=\"Kafka server didn't finish startup\")\n\n # Credentials for inter-broker communication are created before starting Kafka.\n # Client credentials are created after starting Kafka so that both loading of\n # existing credentials from ZK and dynamic update of credentials in Kafka are tested.\n self.security_config.setup_credentials(node, self.path, self.zk_connect_setting(), broker=False)\n\n self.start_jmx_tool(self.idx(node), node)\n if len(self.pids(node)) == 0:\n raise Exception(\"No process ids recorded on node %s\" % node.account.hostname)\n\n def pids(self, node):\n \"\"\"Return process ids associated with running processes on the given node.\"\"\"\n try:\n cmd = \"jcmd | grep -e %s | awk '{print $1}'\" % self.java_class_name()\n pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]\n return pid_arr\n except (RemoteCommandError, ValueError) as e:\n return []\n\n def signal_node(self, node, sig=signal.SIGTERM):\n pids = self.pids(node)\n for pid in pids:\n node.account.signal(pid, sig)\n\n def signal_leader(self, topic, partition=0, sig=signal.SIGTERM):\n leader = self.leader(topic, partition)\n self.signal_node(leader, sig)\n\n def stop_node(self, node, clean_shutdown=True):\n pids = self.pids(node)\n sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL\n\n for pid in pids:\n node.account.signal(pid, sig, allow_fail=False)\n wait_until(lambda: len(self.pids(node)) == 0, timeout_sec=60, err_msg=\"Kafka node failed to stop\")\n\n def clean_node(self, node):\n JmxMixin.clean_node(self, node)\n self.security_config.clean_node(node)\n node.account.kill_java_processes(self.java_class_name(),\n clean_shutdown=False, allow_fail=True)\n node.account.ssh(\"sudo rm -rf -- %s\" % KafkaService.PERSISTENT_ROOT, allow_fail=False)\n\n def create_topic(self, topic_cfg, node=None):\n \"\"\"Run the admin tool create topic command.\n Specifying node is optional, and may be done if for different kafka nodes have different versions,\n and we care where command gets run.\n\n If the node is not specified, run the command from self.nodes[0]\n \"\"\"\n if node is None:\n node = self.nodes[0]\n self.logger.info(\"Creating topic %s with settings %s\",\n topic_cfg[\"topic\"], topic_cfg)\n kafka_topic_script = self.path.script(\"kafka-topics.sh\", node)\n\n cmd = kafka_topic_script + \" \"\n cmd += \"--zookeeper %(zk_connect)s --create --topic %(topic)s \" % {\n 'zk_connect': self.zk_connect_setting(),\n 'topic': topic_cfg.get(\"topic\"),\n }\n if 'replica-assignment' in topic_cfg:\n cmd += \" --replica-assignment %(replica-assignment)s\" % {\n 'replica-assignment': topic_cfg.get('replica-assignment')\n }\n else:\n cmd += \" --partitions %(partitions)d --replication-factor %(replication-factor)d\" % {\n 'partitions': topic_cfg.get('partitions', 1),\n 'replication-factor': topic_cfg.get('replication-factor', 1)\n }\n\n if \"configs\" in topic_cfg.keys() and topic_cfg[\"configs\"] is not None:\n for config_name, config_value in topic_cfg[\"configs\"].items():\n cmd += \" --config %s=%s\" % (config_name, str(config_value))\n\n self.logger.info(\"Running topic creation command...\\n%s\" % cmd)\n node.account.ssh(cmd)\n\n time.sleep(1)\n self.logger.info(\"Checking to see if topic was properly created...\\n%s\" % cmd)\n for line in self.describe_topic(topic_cfg[\"topic\"]).split(\"\\n\"):\n self.logger.info(line)\n\n def describe_topic(self, topic, node=None):\n if node is None:\n node = self.nodes[0]\n cmd = \"%s --zookeeper %s --topic %s --describe\" % \\\n (self.path.script(\"kafka-topics.sh\", node), self.zk_connect_setting(), topic)\n output = \"\"\n for line in node.account.ssh_capture(cmd):\n output += line\n return output\n\n def list_topics(self, topic, node=None):\n if node is None:\n node = self.nodes[0]\n cmd = \"%s --zookeeper %s --list\" % \\\n (self.path.script(\"kafka-topics.sh\", node), self.zk_connect_setting())\n for line in node.account.ssh_capture(cmd):\n if not line.startswith(\"SLF4J\"):\n yield line.rstrip()\n\n def alter_message_format(self, topic, msg_format_version, node=None):\n if node is None:\n node = self.nodes[0]\n self.logger.info(\"Altering message format version for topic %s with format %s\", topic, msg_format_version)\n cmd = \"%s --zookeeper %s --entity-name %s --entity-type topics --alter --add-config message.format.version=%s\" % \\\n (self.path.script(\"kafka-configs.sh\", node), self.zk_connect_setting(), topic, msg_format_version)\n self.logger.info(\"Running alter message format command...\\n%s\" % cmd)\n node.account.ssh(cmd)\n\n def parse_describe_topic(self, topic_description):\n \"\"\"Parse output of kafka-topics.sh --describe (or describe_topic() method above), which is a string of form\n PartitionCount:2\\tReplicationFactor:2\\tConfigs:\n Topic: test_topic\\ttPartition: 0\\tLeader: 3\\tReplicas: 3,1\\tIsr: 3,1\n Topic: test_topic\\tPartition: 1\\tLeader: 1\\tReplicas: 1,2\\tIsr: 1,2\n into a dictionary structure appropriate for use with reassign-partitions tool:\n {\n \"partitions\": [\n {\"topic\": \"test_topic\", \"partition\": 0, \"replicas\": [3, 1]},\n {\"topic\": \"test_topic\", \"partition\": 1, \"replicas\": [1, 2]}\n ]\n }\n \"\"\"\n lines = map(lambda x: x.strip(), topic_description.split(\"\\n\"))\n partitions = []\n for line in lines:\n m = re.match(\".*Leader:.*\", line)\n if m is None:\n continue\n\n fields = line.split(\"\\t\")\n # [\"Partition: 4\", \"Leader: 0\"] -> [\"4\", \"0\"]\n fields = map(lambda x: x.split(\" \")[1], fields)\n partitions.append(\n {\"topic\": fields[0],\n \"partition\": int(fields[1]),\n \"replicas\": map(int, fields[3].split(','))})\n return {\"partitions\": partitions}\n\n def verify_reassign_partitions(self, reassignment, node=None):\n \"\"\"Run the reassign partitions admin tool in \"verify\" mode\n \"\"\"\n if node is None:\n node = self.nodes[0]\n\n json_file = \"/tmp/%s_reassign.json\" % str(time.time())\n\n # reassignment to json\n json_str = json.dumps(reassignment)\n json_str = json.dumps(json_str)\n\n # create command\n cmd = \"echo %s > %s && \" % (json_str, json_file)\n cmd += \"%s \" % self.path.script(\"kafka-reassign-partitions.sh\", node)\n cmd += \"--zookeeper %s \" % self.zk_connect_setting()\n cmd += \"--reassignment-json-file %s \" % json_file\n cmd += \"--verify \"\n cmd += \"&& sleep 1 && rm -f %s\" % json_file\n\n # send command\n self.logger.info(\"Verifying parition reassignment...\")\n self.logger.debug(cmd)\n output = \"\"\n for line in node.account.ssh_capture(cmd):\n output += line\n\n self.logger.debug(output)\n\n if re.match(\".*Reassignment of partition.*failed.*\",\n output.replace('\\n', '')) is not None:\n return False\n\n if re.match(\".*is still in progress.*\",\n output.replace('\\n', '')) is not None:\n return False\n\n return True\n\n def execute_reassign_partitions(self, reassignment, node=None,\n throttle=None):\n \"\"\"Run the reassign partitions admin tool in \"verify\" mode\n \"\"\"\n if node is None:\n node = self.nodes[0]\n json_file = \"/tmp/%s_reassign.json\" % str(time.time())\n\n # reassignment to json\n json_str = json.dumps(reassignment)\n json_str = json.dumps(json_str)\n\n # create command\n cmd = \"echo %s > %s && \" % (json_str, json_file)\n cmd += \"%s \" % self.path.script( \"kafka-reassign-partitions.sh\", node)\n cmd += \"--zookeeper %s \" % self.zk_connect_setting()\n cmd += \"--reassignment-json-file %s \" % json_file\n cmd += \"--execute\"\n if throttle is not None:\n cmd += \" --throttle %d\" % throttle\n cmd += \" && sleep 1 && rm -f %s\" % json_file\n\n # send command\n self.logger.info(\"Executing parition reassignment...\")\n self.logger.debug(cmd)\n output = \"\"\n for line in node.account.ssh_capture(cmd):\n output += line\n\n self.logger.debug(\"Verify partition reassignment:\")\n self.logger.debug(output)\n\n def search_data_files(self, topic, messages):\n \"\"\"Check if a set of messages made it into the Kakfa data files. Note that\n this method takes no account of replication. It simply looks for the\n payload in all the partition files of the specified topic. 'messages' should be\n an array of numbers. The list of missing messages is returned.\n \"\"\"\n payload_match = \"payload: \" + \"$|payload: \".join(str(x) for x in messages) + \"$\"\n found = set([])\n self.logger.debug(\"number of unique missing messages we will search for: %d\",\n len(messages))\n for node in self.nodes:\n # Grab all .log files in directories prefixed with this topic\n files = node.account.ssh_capture(\"find %s* -regex '.*/%s-.*/[^/]*.log'\" % (KafkaService.DATA_LOG_DIR_PREFIX, topic))\n\n # Check each data file to see if it contains the messages we want\n for log in files:\n cmd = \"%s kafka.tools.DumpLogSegments --print-data-log --files %s | grep -E \\\"%s\\\"\" % \\\n (self.path.script(\"kafka-run-class.sh\", node), log.strip(), payload_match)\n\n for line in node.account.ssh_capture(cmd, allow_fail=True):\n for val in messages:\n if line.strip().endswith(\"payload: \"+str(val)):\n self.logger.debug(\"Found %s in data-file [%s] in line: [%s]\" % (val, log.strip(), line.strip()))\n found.add(val)\n\n self.logger.debug(\"Number of unique messages found in the log: %d\",\n len(found))\n missing = list(set(messages) - found)\n\n if len(missing) > 0:\n self.logger.warn(\"The following values were not found in the data files: \" + str(missing))\n\n return missing\n\n def restart_node(self, node, clean_shutdown=True):\n \"\"\"Restart the given node.\"\"\"\n self.stop_node(node, clean_shutdown)\n self.start_node(node)\n\n def isr_idx_list(self, topic, partition=0):\n \"\"\" Get in-sync replica list the given topic and partition.\n \"\"\"\n self.logger.debug(\"Querying zookeeper to find in-sync replicas for topic %s and partition %d\" % (topic, partition))\n zk_path = \"/brokers/topics/%s/partitions/%d/state\" % (topic, partition)\n partition_state = self.zk.query(zk_path, chroot=self.zk_chroot)\n\n if partition_state is None:\n raise Exception(\"Error finding partition state for topic %s and partition %d.\" % (topic, partition))\n\n partition_state = json.loads(partition_state)\n self.logger.info(partition_state)\n\n isr_idx_list = partition_state[\"isr\"]\n self.logger.info(\"Isr for topic %s and partition %d is now: %s\" % (topic, partition, isr_idx_list))\n return isr_idx_list\n\n def replicas(self, topic, partition=0):\n \"\"\" Get the assigned replicas for the given topic and partition.\n \"\"\"\n self.logger.debug(\"Querying zookeeper to find assigned replicas for topic %s and partition %d\" % (topic, partition))\n zk_path = \"/brokers/topics/%s\" % (topic)\n assignemnt = self.zk.query(zk_path, chroot=self.zk_chroot)\n\n if assignemnt is None:\n raise Exception(\"Error finding partition state for topic %s and partition %d.\" % (topic, partition))\n\n assignemnt = json.loads(assignemnt)\n self.logger.info(assignemnt)\n\n replicas = assignemnt[\"partitions\"][str(partition)]\n\n self.logger.info(\"Assigned replicas for topic %s and partition %d is now: %s\" % (topic, partition, replicas))\n return [self.get_node(replica) for replica in replicas]\n\n def leader(self, topic, partition=0):\n \"\"\" Get the leader replica for the given topic and partition.\n \"\"\"\n self.logger.debug(\"Querying zookeeper to find leader replica for topic %s and partition %d\" % (topic, partition))\n zk_path = \"/brokers/topics/%s/partitions/%d/state\" % (topic, partition)\n partition_state = self.zk.query(zk_path, chroot=self.zk_chroot)\n\n if partition_state is None:\n raise Exception(\"Error finding partition state for topic %s and partition %d.\" % (topic, partition))\n\n partition_state = json.loads(partition_state)\n self.logger.info(partition_state)\n\n leader_idx = int(partition_state[\"leader\"])\n self.logger.info(\"Leader for topic %s and partition %d is now: %d\" % (topic, partition, leader_idx))\n return self.get_node(leader_idx)\n\n def cluster_id(self):\n \"\"\" Get the current cluster id\n \"\"\"\n self.logger.debug(\"Querying ZooKeeper to retrieve cluster id\")\n cluster = self.zk.query(\"/cluster/id\", chroot=self.zk_chroot)\n\n try:\n return json.loads(cluster)['id'] if cluster else None\n except:\n self.logger.debug(\"Data in /cluster/id znode could not be parsed. Data = %s\" % cluster)\n raise\n\n def list_consumer_groups(self, node=None, new_consumer=True, command_config=None):\n \"\"\" Get list of consumer groups.\n \"\"\"\n if node is None:\n node = self.nodes[0]\n consumer_group_script = self.path.script(\"kafka-consumer-groups.sh\", node)\n\n if command_config is None:\n command_config = \"\"\n else:\n command_config = \"--command-config \" + command_config\n\n if new_consumer:\n cmd = \"%s --new-consumer --bootstrap-server %s %s --list\" % \\\n (consumer_group_script,\n self.bootstrap_servers(self.security_protocol),\n command_config)\n else:\n cmd = \"%s --zookeeper %s %s --list\" % (consumer_group_script, self.zk_connect_setting(), command_config)\n output = \"\"\n self.logger.debug(cmd)\n for line in node.account.ssh_capture(cmd):\n if not line.startswith(\"SLF4J\"):\n output += line\n self.logger.debug(output)\n return output\n\n def describe_consumer_group(self, group, node=None, new_consumer=True, command_config=None):\n \"\"\" Describe a consumer group.\n \"\"\"\n if node is None:\n node = self.nodes[0]\n consumer_group_script = self.path.script(\"kafka-consumer-groups.sh\", node)\n\n if command_config is None:\n command_config = \"\"\n else:\n command_config = \"--command-config \" + command_config\n\n if new_consumer:\n cmd = \"%s --new-consumer --bootstrap-server %s %s --group %s --describe\" % \\\n (consumer_group_script, self.bootstrap_servers(self.security_protocol), command_config, group)\n else:\n cmd = \"%s --zookeeper %s %s --group %s --describe\" % \\\n (consumer_group_script, self.zk_connect_setting(), command_config, group)\n output = \"\"\n self.logger.debug(cmd)\n for line in node.account.ssh_capture(cmd):\n if not (line.startswith(\"SLF4J\") or line.startswith(\"TOPIC\") or line.startswith(\"Could not fetch offset\")):\n output += line\n self.logger.debug(output)\n return output\n\n def zk_connect_setting(self):\n return self.zk.connect_setting(self.zk_chroot)\n\n def bootstrap_servers(self, protocol='PLAINTEXT', validate=True, offline_nodes=[]):\n \"\"\"Return comma-delimited list of brokers in this cluster formatted as HOSTNAME1:PORT1,HOSTNAME:PORT2,...\n\n This is the format expected by many config files.\n \"\"\"\n port_mapping = self.port_mappings[protocol]\n self.logger.info(\"Bootstrap client port is: \" + str(port_mapping.number))\n\n if validate and not port_mapping.open:\n raise ValueError(\"We are retrieving bootstrap servers for the port: %s which is not currently open. - \" % str(port_mapping))\n\n return ','.join([node.account.hostname + \":\" + str(port_mapping.number) for node in self.nodes if node not in offline_nodes])\n\n def controller(self):\n \"\"\" Get the controller node\n \"\"\"\n self.logger.debug(\"Querying zookeeper to find controller broker\")\n controller_info = self.zk.query(\"/controller\", chroot=self.zk_chroot)\n\n if controller_info is None:\n raise Exception(\"Error finding controller info\")\n\n controller_info = json.loads(controller_info)\n self.logger.debug(controller_info)\n\n controller_idx = int(controller_info[\"brokerid\"])\n self.logger.info(\"Controller's ID: %d\" % (controller_idx))\n return self.get_node(controller_idx)\n\n def is_registered(self, node):\n \"\"\"\n Check whether a broker is registered in Zookeeper\n \"\"\"\n self.logger.debug(\"Querying zookeeper to see if broker %s is registered\", node)\n broker_info = self.zk.query(\"/brokers/ids/%s\" % self.idx(node), chroot=self.zk_chroot)\n self.logger.debug(\"Broker info: %s\", broker_info)\n return broker_info is not None\n\n def get_offset_shell(self, topic, partitions, max_wait_ms, offsets, time):\n node = self.nodes[0]\n\n cmd = self.path.script(\"kafka-run-class.sh\", node)\n cmd += \" kafka.tools.GetOffsetShell\"\n cmd += \" --topic %s --broker-list %s --max-wait-ms %s --offsets %s --time %s\" % (topic, self.bootstrap_servers(self.security_protocol), max_wait_ms, offsets, time)\n\n if partitions:\n cmd += ' --partitions %s' % partitions\n\n cmd += \" 2>> %s/get_offset_shell.log\" % KafkaService.PERSISTENT_ROOT\n cmd += \" | tee -a %s/get_offset_shell.log &\" % KafkaService.PERSISTENT_ROOT\n output = \"\"\n self.logger.debug(cmd)\n for line in node.account.ssh_capture(cmd):\n output += line\n self.logger.debug(output)\n return output\n\n def java_class_name(self):\n return \"kafka.Kafka\"\n", "sub_path": "tests/kafkatest/services/kafka/kafka.py", "file_name": "kafka.py", "file_ext": "py", "file_size_in_byte": 28960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "collections.namedtuple", "line_number": 35, "usage_type": "call"}, {"api_name": "kafkatest.directory_layout.kafka_path.KafkaPathResolverMixin", "line_number": 38, "usage_type": "name"}, {"api_name": "kafkatest.services.monitor.jmx.JmxMixin", "line_number": 38, "usage_type": "name"}, {"api_name": "ducktape.services.service.Service", "line_number": 38, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 40, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 41, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 43, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 44, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 45, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 47, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 50, "usage_type": "name"}, {"api_name": "kafkatest.services.security.security_config.SecurityConfig.PLAINTEXT", "line_number": 72, "usage_type": "attribute"}, {"api_name": "kafkatest.services.security.security_config.SecurityConfig", "line_number": 72, "usage_type": "name"}, {"api_name": "kafkatest.services.security.security_config.SecurityConfig.SASL_MECHANISM_GSSAPI", "line_number": 73, "usage_type": "attribute"}, {"api_name": "kafkatest.services.security.security_config.SecurityConfig", "line_number": 73, "usage_type": "name"}, {"api_name": "kafkatest.version.DEV_BRANCH", "line_number": 74, "usage_type": "name"}, {"api_name": "ducktape.services.service.Service.__init__", "line_number": 81, "usage_type": "call"}, {"api_name": "ducktape.services.service.Service", "line_number": 81, "usage_type": "name"}, {"api_name": "kafkatest.services.monitor.jmx.JmxMixin.__init__", "line_number": 82, "usage_type": "call"}, {"api_name": "kafkatest.services.monitor.jmx.JmxMixin", "line_number": 82, "usage_type": "name"}, {"api_name": "config.KafkaConfig", "line_number": 126, "usage_type": "call"}, {"api_name": "kafkatest.services.kafka.config_property.BROKER_ID", "line_number": 126, "usage_type": "attribute"}, {"api_name": "kafkatest.services.kafka.config_property", "line_number": 126, "usage_type": "name"}, {"api_name": "kafkatest.services.security.security_config.SecurityConfig", "line_number": 135, "usage_type": "call"}, {"api_name": "config.enable_security_protocol", "line_number": 141, "usage_type": "call"}, {"api_name": "kafkatest.services.security.minikdc.MiniKdc", "line_number": 153, "usage_type": "call"}, {"api_name": "ducktape.services.service.Service.start", "line_number": 166, "usage_type": "call"}, {"api_name": "ducktape.services.service.Service", "line_number": 166, "usage_type": "name"}, {"api_name": "config.KafkaConfig", "line_number": 191, "usage_type": "call"}, {"api_name": "kafkatest.services.kafka.config_property.ADVERTISED_HOSTNAME", "line_number": 192, "usage_type": "attribute"}, {"api_name": "kafkatest.services.kafka.config_property", "line_number": 192, "usage_type": "name"}, {"api_name": "kafkatest.services.kafka.config_property.ZOOKEEPER_CONNECT", "line_number": 193, "usage_type": "attribute"}, {"api_name": "kafkatest.services.kafka.config_property", "line_number": 193, "usage_type": "name"}, {"api_name": "ducktape.cluster.remoteaccount.RemoteCommandError", "line_number": 250, "usage_type": "name"}, {"api_name": "signal.SIGTERM", "line_number": 253, "usage_type": "attribute"}, {"api_name": "signal.SIGTERM", "line_number": 258, "usage_type": "attribute"}, {"api_name": "signal.SIGTERM", "line_number": 264, "usage_type": "attribute"}, {"api_name": "signal.SIGKILL", "line_number": 264, "usage_type": "attribute"}, {"api_name": "ducktape.utils.util.wait_until", "line_number": 268, "usage_type": "call"}, {"api_name": "kafkatest.services.monitor.jmx.JmxMixin.clean_node", "line_number": 271, "usage_type": "call"}, {"api_name": "kafkatest.services.monitor.jmx.JmxMixin", "line_number": 271, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 312, "usage_type": "call"}, {"api_name": "re.match", "line_number": 361, "usage_type": "call"}, {"api_name": "time.time", "line_number": 380, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 383, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 384, "usage_type": "call"}, {"api_name": "re.match", "line_number": 403, "usage_type": "call"}, {"api_name": "re.match", "line_number": 407, "usage_type": "call"}, {"api_name": "time.time", "line_number": 419, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 422, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 423, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 494, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 511, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 529, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 543, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 626, "usage_type": "call"}]} +{"seq_id": "233203490", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.index, name='index'),\n\turl(r'^intro/study_purpose/$', views.intro_study_purpose, name='intro_study_purpose'),\n\turl(r'^intro/study_goal/$', views.intro_study_goal, name='intro_study_goal'),\n\n\turl(r'^study/method/examinees_status/$', views.study_method_examinees_status, name='study_method_examinees_status'),\n\turl(r'^study/method/collect_data/$', views.study_method_collect_data, name='study_method_collect_data'),\n\n\turl(r'^study/result/thesis/$', views.study_result_thesis, name='study_result_thesis'),\n\turl(r'^study/result/news_letter/$', views.study_result_news_letter, name='study_result_news_letter'),\n\turl(r'^study/result/detail/$', views.post_detail, name='study_result_detail'),\n\n\turl(r'^question/detail/$', views.post_detail, name='question_detail'),\n\turl(r'^question/write/$', views.question_write, name='question_write'),\n\turl(r'^question/notice/$', views.question_notice, name='question_notice'),\n\turl(r'^question/faq/$', views.question_faq, name='question_faq'),\n\turl(r'^question/inquiry/$', views.question_inquiry, name='question_inquiry'),\n\turl(r'^question/contact/$', views.question_contact, name='question_contact'),\n\n]\n", "sub_path": "user_app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1224, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "589945012", "text": "import json\nfrom future.utils import viewitems\n\nnodes = {'A':['B','C'],\n 'B':['D','E']}\n\njs = json.dumps(nodes)\nprint(nodes['A'])\nf = open(\"dict.json\",\"w\")\nf.write(js)\nf.close()\n\nfor (key, value) in viewitems(nodes):\n print(key, value)\n\nfor key in nodes:\n for value in nodes[key]:\n print(value)\n", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 316, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "json.dumps", "line_number": 7, "usage_type": "call"}, {"api_name": "future.utils.viewitems", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "633957525", "text": "\"\"\" This module implements the analysis of the transformed binary image \"\"\"\nimport numpy as np\nimport cv2\n\n\nclass LaneDetector(object):\n def __init__(self, filter_len=30, polyfilter_len=20):\n # The length of the smoothing filter\n self._filter_len = filter_len\n self._polyfilter_len = polyfilter_len\n\n # Array for storing the last n detections of the first peaks\n self._detections = []\n\n # Array of storing the last n lane polys\n self._poly_detections = []\n\n def __add_to_poly_detections(self, polynomials):\n \"\"\"\n Helper function to push an element in filter sequence\n \"\"\"\n if len(self._poly_detections) >= self._polyfilter_len:\n # Shift all items if filter is filled\n for i in range(len(self._poly_detections) - 1):\n self._poly_detections[i] = self._poly_detections[i + 1]\n self._poly_detections[-1] = polynomials\n else:\n self._poly_detections.append(polynomials)\n\n def __get_mean_of_polynomials(self):\n \"\"\"\n Helper function to get the mean values from filter sequence\n \"\"\"\n if len(self._poly_detections) > 0:\n return np.mean([x[0] for x in self._poly_detections], axis=0), \\\n np.mean([x[1] for x in self._poly_detections], axis=0)\n\n return None, None\n\n def __add_to_detections(self, detection):\n \"\"\"\n Helper function to push an element in filter sequence\n \"\"\"\n if len(self._detections) >= self._filter_len:\n # Shift all items if filter is filled\n for i in range(len(self._detections)-1):\n self._detections[i] = self._detections[i + 1]\n self._detections[-1] = detection\n else:\n self._detections.append([detection[0], detection[1]])\n\n def __get_mean_of_detections(self):\n \"\"\"\n Helper function to get the mean values from filter sequence\n \"\"\"\n if len(self._detections) > 0:\n mean_values = [int(np.mean([x[0] for x in self._detections])),\n int(np.mean([x[1] for x in self._detections]))]\n return mean_values\n return None\n\n def find_lanes_hist_peaks_initial(self, bin_warped_img, signal_threshold=10000, signal_filter_len=30):\n \"\"\"\n This function will search for initial peak points in the bird-eye-view-warped binary image.\n :returns A list of peaks that correspond to potential lane lines.\n \"\"\"\n # Compute Histogram over the lower half of image\n col_sum = np.sum(bin_warped_img[bin_warped_img.shape[0] // 2:, :], axis=0)\n col_sum_bin = np.zeros_like(col_sum)\n\n # Signal filtering\n for i in range(col_sum.shape[0]):\n col_sum_bin[i] = 1 if np.max(col_sum[i:i + signal_filter_len]) > signal_threshold else 0\n\n # Detect Peaks\n peaks = []\n # A detected peak has an offset of filter-length / 2\n peak_offset = signal_filter_len // 2\n last_lowhigh_index = 0\n for i in range(col_sum.shape[0]):\n if i > 0 and col_sum_bin[i] > col_sum_bin[i - 1]: # Going from LOW to HIGH - remember this event\n last_lowhigh_index = i\n if i > 0 and col_sum_bin[i] < col_sum_bin[i - 1]: # Going from HIGH to LOW - mean it with last LH for peak\n if last_lowhigh_index is None:\n raise ValueError('No LOW to HIGH transition detected for a peak.')\n peaks.append(((last_lowhigh_index + i) // 2) + peak_offset)\n last_lowhigh_index = None\n\n # Check for Signal at end of sequence\n if last_lowhigh_index is not None:\n peaks.append(((last_lowhigh_index + int(col_sum.shape[0])) // 2) + peak_offset)\n\n return peaks\n\n def find_lanes_hist_peaks_filter(self, bin_warped_img, peak_list, lane_seed_peak_threshold=75):\n \"\"\"\n This function takes a peak list as starting points for filtering these start points.\n It will look in self._detections for last seed-peaks for the search.\n \"\"\"\n # If there are peaks in the detections list, use them and look if a new peak is near them.\n # If a new peak is near enough, it is mean-ed with the last detection point.\n current_seed_peak_detection = [None, None]\n\n # Maybe we can pre-fill the current_seed_peak_detection\n if len(self._detections) > 0:\n # Yes, we have some older detections\n current_seed_peak_detection = self._detections[-1]\n # Check for new peaks\n for peak in peak_list:\n # get the last detected seed peaks and compare to new peaks\n for i, last_seed_peaks in enumerate(current_seed_peak_detection):\n if np.abs(last_seed_peaks - peak) <= lane_seed_peak_threshold:\n current_seed_peak_detection[i] = (last_seed_peaks + peak) // 2\n else:\n # Oh no, there are no pre-detections\n # Lets simply take the nearest peak to the middle, in the left half of the image.\n # Do the same for the right peak.\n mid_of_view = bin_warped_img.shape[1] // 2\n\n left_peaks = []\n right_peaks = []\n for peak in peak_list:\n abs_dist = np.abs(mid_of_view-peak)\n if peak > mid_of_view:\n right_peaks.append((peak, abs_dist))\n else:\n left_peaks.append((peak, abs_dist))\n\n # Abort if no peaks were found\n if len(left_peaks) <= 0 or len(right_peaks) <= 0:\n return None\n\n # Otherwise, we have our candidates!\n current_seed_peak_detection[0] = sorted(left_peaks, key=lambda x: x[1])[0][0]\n current_seed_peak_detection[1] = sorted(right_peaks, key=lambda x: x[1])[0][0]\n\n self.__add_to_detections(current_seed_peak_detection)\n current_seed_peak_detection = self.__get_mean_of_detections()\n\n return current_seed_peak_detection\n\n def find_lanes_hist_peaks_lane_positions(self,\n bin_warped_img,\n seed_peaks,\n intensity_threshold=0.1,\n shift_threshold=35,\n win_slide_size=(42, 42)):\n \"\"\"\n This function will detect the lane pixels from seed peak points.\n :param bin_warped_img: The warped binary image.\n :param seed_peaks: Exactly two peaks as seeds.\n :return: lane positions, coordinates of valid rectangles\n \"\"\"\n if seed_peaks is None:\n return None, None\n\n if len(seed_peaks) != 2:\n raise Exception('Expecting exactly 2 seed peaks.')\n\n abs_intensity_threshold = win_slide_size[0] * win_slide_size[1] * intensity_threshold\n\n # A sliding window will now start at the seed peaks to find local maxima.\n # This is the array with 2 arrays, containing the detected positions,\n # it is initialized with the bottom-seed peaks:\n lane_positions = [[(bin_warped_img.shape[0], seed_peaks[0])], [(bin_warped_img.shape[0], seed_peaks[1])]]\n\n for i, seed_peak in enumerate(seed_peaks):\n for height in reversed(range(0, bin_warped_img.shape[0] + win_slide_size[0], win_slide_size[0])):\n max_signal = -1\n max_width = -1\n for width in range(seed_peak - shift_threshold, seed_peak):\n # look for the strongest signal in this horizontal slice\n non_zeros = np.count_nonzero(bin_warped_img[height:height+win_slide_size[0],\n width:width+win_slide_size[1]])\n if non_zeros > abs_intensity_threshold and non_zeros > max_signal:\n max_signal = non_zeros\n max_width = width\n\n if max_signal > 0:\n seed_peak = max_width + (win_slide_size[1] // 2)\n lane_positions[i].append([height, max_width + (win_slide_size[1] // 2)])\n\n return lane_positions\n\n def find_lanes_hist_peaks_lane_pixels(self, bin_warped_img, lane_positions, win_slide_size):\n # For every detected position, the underlying pixels are declared as the lane mask\n lane_pixels = [np.zeros_like(bin_warped_img),\n np.zeros_like(bin_warped_img)]\n\n for i, pos in enumerate(lane_positions):\n drawing_rects = np.zeros_like(bin_warped_img)\n for point in pos:\n pt1 = (point[1] - (win_slide_size[0] // 2), point[0] - (win_slide_size[0] // 2))\n pt2 = (point[1] + (win_slide_size[0] // 2), point[0] + (win_slide_size[0] // 2))\n cv2.rectangle(drawing_rects, pt1, pt2, 255, -1)\n\n lane_pixels[i][(drawing_rects > 0) & (bin_warped_img > 0)] = 255\n\n return lane_pixels[0], lane_pixels[1]\n\n def fit_poly(self, mask_left, mask_right):\n \"\"\"\n This function will fit the polys into the pixels of the lanes.\n :param mask_left: a mask of the left lane pixels.\n :param mask_right: a mask of the right lane pixels.\n :return: polynomials as array\n \"\"\"\n left_pxs = np.where(mask_left > 0)\n rigth_pxs = np.where(mask_right > 0)\n\n if len(rigth_pxs[0]) <= 0 or len(left_pxs[0]) <= 0:\n return None, None\n\n left_poly = np.polyfit(left_pxs[0], left_pxs[1], 2)\n right_poly = np.polyfit(rigth_pxs[0], rigth_pxs[1], 2)\n\n return left_poly, right_poly\n\n def draw_poly(self, img, poly):\n \"\"\"\n This function paints all points of the polynomial in the image.\n :param img: A grayscale image.\n :param poly: The polynomial.\n :return: Image with inpainted polynomial.\n \"\"\"\n if poly is None:\n return img\n\n ys = np.linspace(0, img.shape[0] - 1, img.shape[0])\n xs = poly[0] * ys ** 2 + poly[1] * ys + poly[2]\n for pt in zip(xs, ys):\n if int(pt[0]) < img.shape[1] and int(pt[1]) < img.shape[0] and int(pt[0]) >= 0 and int(pt[1]) > 0:\n img[int(pt[1]), int(pt[0])] = 255\n\n return img\n\n def filter_poly(self, poly_left, poly_right):\n # Push the current polynomials to filter sequence\n if poly_left is not None and poly_right is not None:\n self.__add_to_poly_detections([poly_left, poly_right])\n\n # Get Mean of last n polynomials\n return self.__get_mean_of_polynomials()\n\n def calc_radius_offset_poly(self, poly_left, poly_right, image_width, image_height):\n # Define conversions in x and y from pixels space to meters\n ym_per_pix = 30 / 720 # meters per pixel in y dimension\n xm_per_pix = 3.7 / 700 # meters per pixel in x dimension\n\n # Calculate the offset of the car to center\n left_pos = (poly_left[0] * image_height ** 2 + poly_left[1] * image_height + poly_left[2]) - (image_width / 2)\n right_pos = (poly_right[0] * image_height ** 2 + poly_right[1] * image_height + poly_right[2]) - (image_width / 2)\n\n # Check if vehicle is left or right from center\n if abs(left_pos) > abs(right_pos):\n offset = -abs(right_pos-abs(left_pos))*xm_per_pix\n else:\n offset = abs(right_pos-abs(left_pos))*xm_per_pix\n\n # Convert polys to world space by recalculating them\n ys = np.linspace(0, image_height, num=image_height)\n xs_left = []\n xs_right = []\n for y in ys:\n xs_left.append(poly_left[0] * y ** 2 + poly_left[1] * y + poly_left[2])\n xs_right.append(poly_right[0] * y ** 2 + poly_right[1] * y + poly_right[2])\n\n # Create hstack from points for opencv fillPoly\n pts_left = np.array([np.transpose(np.vstack([xs_left, ys]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([xs_right, ys])))])\n poly = np.hstack((pts_left, pts_right))\n\n # Fit new polynomials to x,y in world space\n poly_left_meters = np.polyfit(ys * ym_per_pix, np.array(xs_left) * xm_per_pix, 2)\n poly_right_meters = np.polyfit(ys * ym_per_pix, np.array(xs_right) * xm_per_pix, 2)\n\n # Calculate the new radii of curvature\n left_curverad = ((1 + (2 * poly_left_meters[0] * image_height * ym_per_pix + poly_left_meters[1]) ** 2) ** 1.5) \\\n / np.absolute(2 * poly_left_meters[0])\n right_curverad = ((1 + (2 * poly_right_meters[0] * image_height * ym_per_pix + poly_right_meters[1]) ** 2)\n ** 1.5) / np.absolute(2 * poly_right_meters[0])\n\n radius = (left_curverad + right_curverad) / 2\n return radius, offset, poly\n\n def sanity_check_poly(self, poly_left, poly_right, t_A=None, t_B=0.2):\n \"\"\"\n This function performs a basic sanity check on the 2 polynomials of f(y) = Ay2 * By + C\n :param poly_left: The polynomial of the left lane.\n :param poly_right: The polynomial of the right lane.\n :param t_A: If the absolute difference of A_left and A_right is greater than t_A, None, None is returned.\n :param t_B: The same as t_A but for B_left and B_right\n :return: None, None if thresholds were exceeded.\n \"\"\"\n # Check if polys are None\n if poly_left is None or poly_right is None:\n return None, None\n\n abs_diffs = []\n for i in range(len(poly_left)):\n abs_diffs.append(int(abs(poly_left[i] - poly_right[i]) * 100) / 100.0)\n\n if t_A is not None and abs_diffs[0] > t_A:\n return None, None\n\n if t_B is not None and abs_diffs[1] > t_B:\n return None, None\n\n return poly_left, poly_right\n\n\n def find_lanes_hist_peaks(self, bin_warped_img):\n # Window size for detection and inpainting\n win_size = (42, 42)\n\n # Current polynomials for the lanes\n poly_left = None\n poly_right = None\n poly = None\n radius_of_curvature = -1\n offset_to_center = -1\n\n mask_left = np.zeros_like(bin_warped_img)\n mask_right = np.zeros_like(bin_warped_img)\n\n # First step is to identify histogram peaks in the histogram of sums of the lower image part\n bare_peaks = self.find_lanes_hist_peaks_initial(bin_warped_img)\n\n # Then these currently detected peaks are fed into a smoothing function that checks previous detections\n filt_peaks = self.find_lanes_hist_peaks_filter(bin_warped_img, bare_peaks)\n\n # With the filtered 2 peaks, the lanes are identified by using the sliding window approach\n lane_posit = self.find_lanes_hist_peaks_lane_positions(bin_warped_img, filt_peaks, intensity_threshold=0.05, win_slide_size=win_size)\n if lane_posit[0] is not None:\n # With the rough positions, the pixels are identified\n mask_left, mask_right = self.find_lanes_hist_peaks_lane_pixels(bin_warped_img, lane_posit, win_slide_size=win_size)\n\n if mask_left is not None and mask_right is not None:\n\n # Fit the polynomials into the mask pixels\n poly_left, poly_right = self.fit_poly(mask_left, mask_right)\n\n # Do a basic sanity check for polynomials\n poly_left, poly_right = self.sanity_check_poly(poly_left, poly_right)\n\n # Check if pipeline failed.\n new_polys = poly_left is not None and poly_right is not None\n\n # Filter the polynomials\n poly_left, poly_right = self.filter_poly(poly_left, poly_right)\n\n # Compute the radius of curvature and the position with respect to center\n if poly_left is not None and poly_right is not None:\n radius_of_curvature, offset_to_center, poly = self.calc_radius_offset_poly(poly_left,\n poly_right,\n bin_warped_img.shape[1],\n bin_warped_img.shape[0])\n\n # Prepare the image for the inpainted green lane\n image_with_lane = np.zeros(shape=(bin_warped_img.shape[0], bin_warped_img.shape[1], 3), dtype=np.uint8)\n if poly is not None:\n color = (0, 255, 0) if new_polys else (0, 255, 196)\n cv2.fillPoly(image_with_lane, np.int_([poly]), color)\n\n image_with_polys = np.zeros_like(bin_warped_img)\n image_with_polys = self.draw_poly(image_with_polys, poly_left)\n image_with_polys = self.draw_poly(image_with_polys, poly_right)\n image_with_polys = cv2.dilate(image_with_polys, np.ones((3, 3), np.uint8), 1)\n\n # A nice looking image is created here\n inpainted_lanes = np.zeros(shape=(bin_warped_img.shape[0], bin_warped_img.shape[1], 3), dtype=np.uint8)\n inpainted_lanes[:, :, :][bin_warped_img > 0] = 255\n inpainted_lanes[:, :, :][(mask_left > 0) | (mask_right > 0)] = 0\n inpainted_lanes[:, :, 2][mask_left > 0] = 255\n inpainted_lanes[:, :, 0][mask_right > 0] = 255\n inpainted_lanes[:, :, :][image_with_polys > 0] = [0, 255, 255]\n\n for pos in lane_posit:\n if pos:\n for point in pos:\n pt1 = (point[1] - (win_size[0] // 2), point[0] - (win_size[0] // 2))\n pt2 = (point[1] + (win_size[0] // 2), point[0] + (win_size[0] // 2))\n cv2.rectangle(inpainted_lanes, pt1, pt2, (0, 128, 0), 1)\n\n return inpainted_lanes, image_with_lane, radius_of_curvature, offset_to_center\n\n\n", "sub_path": "packages/lane_detection.py", "file_name": "lane_detection.py", "file_ext": "py", "file_size_in_byte": 17830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "numpy.mean", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 191, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 360, "usage_type": "attribute"}, {"api_name": "cv2.fillPoly", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.int_", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 365, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 368, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 371, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 383, "usage_type": "call"}]} +{"seq_id": "163194454", "text": "# -*- coding: utf-8 -*-\n#\n# @author: Daemon Wang\n# Created on 2016-03-02\n#\nimport base64\nimport os\nimport random\nimport time\nimport datetime\nimport json\nfrom bson.json_util import dumps\nfrom bson.objectid import ObjectId\nimport pymongo\nimport traceback\nimport string\nimport hashlib\nimport urllib\nimport cgi\nimport math\nimport redis\nfrom concurrent import futures\nimport zipfile\nimport re\nimport ZhihuiSMB\nfrom ZhihuiSMB.libs.options import config\nimport uuid\nfrom ZhihuiSMB.libs import const\n\n\ndef get_root_path():\n return os.path.dirname(os.path.abspath(ZhihuiSMB.__file__))\n\n\ndef find_modules(modules_dir):\n try:\n return [f[:-3] for f in os.listdir(modules_dir)\n if not f.startswith('_') and f.endswith('.py')]\n except OSError:\n return []\n\n\ndef get_random_num(length, mode='string'):\n if mode == 'string':\n return ''.join([(string.ascii_letters + string.digits)[x] for x in random.sample(range(0, 62), length)])\n elif mode == 'number':\n return ''.join([(string.digits)[x] for x in random.sample(range(0, 10), length)])\n\n\ndef md5(str):\n m = hashlib.md5()\n m.update(str.encode())\n return m.hexdigest()\n\ndef generate_password(password,loginname):\n m = hashlib.md5()\n m.update((password+loginname).encode())\n res = m.hexdigest()\n return res\n\n\ndef get_uuid():\n return uuid.uuid1()\n\n\ndef get_current_time(format_type='datetime'):\n if format_type == 'datetime':\n format = '%Y-%m-%d %H:%M:%S'\n elif format_type == 'date':\n format = '%Y-%m-%d'\n elif format_type == 'datetime2':\n format = '%Y-%m-%d %H:%M:%S.%f'\n return datetime.datetime.now().strftime(format)[:-3]\n elif format_type == 'directory_date':\n format = '%Y/%m/%d'\n return datetime.datetime.now().strftime(format)\n\n\ndef timestamp_datetime(value, format_type='datetime'):\n if format_type == 'datetime':\n format = '%Y-%m-%d %H:%M:%S'\n elif format_type == 'date':\n format = '%Y-%m-%d'\n # value为传入的值为时间戳(整形),如:1332888820\n value = value + 8 * 60 * 60\n value = time.localtime(value)\n ## 经过localtime转换后变成\n ## time.struct_time(tm_year=2012, tm_mon=3, tm_mday=28, tm_hour=6, tm_min=53, tm_sec=40, tm_wday=2, tm_yday=88, tm_isdst=0)\n # 最后再经过strftime函数转换为正常日期格式。\n dt = time.strftime(format, value)\n return dt\n\n\ndef datetime_timestamp(dt):\n # dt为字符串\n # 中间过程,一般都需要将字符串转化为时间数组\n try:\n time.strptime(dt, '%Y-%m-%d %H:%M:%S')\n ## time.struct_time(tm_year=2012, tm_mon=3, tm_mday=28, tm_hour=6, tm_min=53, tm_sec=40, tm_wday=2, tm_yday=88, tm_isdst=-1)\n # 将\"2012-03-28 06:53:40\"转化为时间戳\n s = time.mktime(time.strptime(dt, '%Y-%m-%d %H:%M:%S'))\n except ValueError:\n time.strptime(dt, '%Y-%m-%d')\n ## time.struct_time(tm_year=2012, tm_mon=3, tm_mday=28, tm_hour=6, tm_min=53, tm_sec=40, tm_wday=2, tm_yday=88, tm_isdst=-1)\n # 将\"2012-03-28 06:53:40\"转化为时间戳\n s = time.mktime(time.strptime(dt, '%Y-%m-%d'))\n return int(s)\n\n\n# 时间字符串转datetime\ndef strtodatetime(datestr, format):\n return datetime.datetime.strptime(datestr, format)\n\n\n# 获取本地时间戳\ndef get_local_timestamp(type='sec'):\n return int(time.time())\n\n\n# 获取当前utc时间\ndef get_utc_now():\n return datetime.datetime.utcnow()\n\n\n# 获取当前时间\ndef get_now():\n return datetime.datetime.now()\n\n\n# 生成0000-00-00时间\ndef get_default_time():\n return datetime.datetime(1, 1, 1, 0, 0, 0)\n\n\n# 生成objectid\ndef create_objectid(str=None):\n try:\n object_id = ObjectId(str)\n except:\n object_id = ''\n return object_id\n\n\n# 将objectid 转换为string字符串\ndef objectid_str(objectid):\n return json.loads(dumps(objectid))['$oid']\n\n\n# 格式化错误信息\ndef format_error():\n return traceback.format_exc()\n\n\ndef str_md5_hex(val):\n return hashlib.md5(val).hexdigest()\n\n\ndef html_encode(str):\n return cgi.escape(str)\n\n\n# 计算分页信息\ndef count_page(length, page, page_size=15, page_show=10):\n if page is None:\n page = 1\n if page_size is None:\n page_size = 15\n\n page = int(page)\n page_size = int(page_size)\n length = int(length)\n if length == 0:\n return {\"enable\": False,\n \"page_size\": page_size,\n \"skip\": 0}\n max_page = int(math.ceil(float(length) / page_size))\n page_num = int(math.ceil(float(page) / page_show))\n pages = list(range(1, max_page + 1)[((page_num - 1) * page_show):(page_num * page_show)])\n skip = (page - 1) * page_size\n if page >= max_page:\n has_more = False\n else:\n has_more = True\n pager = {\n \"page_size\": page_size,\n \"max_page\": max_page,\n \"pages\": pages,\n \"page_num\": page_num,\n \"skip\": skip,\n \"page\": page,\n \"enable\": True,\n \"has_more\": has_more,\n \"length\": length\n }\n return pager\n\n\n# 将两个list合成字典\ndef list_to_dict(list1, list2):\n return dict(zip(list1[::], list2))\n\n\n# 获取请求Host\ndef get_request_host(request):\n return request.headers.get_list('HOST')[0]\n\n\ndef zip_folder(foldername, zip_name):\n filelist = []\n if os.path.isfile(foldername):\n filelist.append(foldername)\n else:\n for root, dirs, files in os.walk(foldername):\n for name in files:\n filelist.append(os.path.join(root, name))\n\n zf = zipfile.ZipFile(zip_name, \"w\", zipfile.zlib.DEFLATED)\n for tar in filelist:\n arcname = tar[len(foldername):]\n # print arcname\n zf.write(tar, arcname)\n zf.close()\n\n\ndef get_concurrent_pool():\n return futures.ThreadPoolExecutor(4)\n\n\n# def MongoDB():\n# # 建立连接\n# client = pymongo.MongoClient(config.li.mongo[\"host\"], options.mongo[\"port\"])\n# db = client[options.mongo[\"database\"]]\n# if options.mongo_auth:\n# db.authenticate(options.mongo[\"user\"], options.mongo[\"password\"])\n# return db\n#\n#\n# def Redis():\n# pool = redis.ConnectionPool(host=options.redis['host'],\n# port=options.redis['port'],\n# db=options.redis['db'])\n# db = redis.StrictRedis(connection_pool=pool)\n# return db\n\n\ndef init_response_data():\n result = {\"success\": 1, \"return_code\": \"success\", \"error_msg\": \"\", \"data\": {}}\n return result\n\n\ndef reset_response_data(code, e=None):\n # print(format_error())\n result = init_response_data()\n if code == 1:\n result[\"return_code\"] = \"success\"\n elif code == -1:\n result[\"return_code\"] = \"token invalidate\"\n else:\n result[\"return_code\"] = e or \"error\"\n result[\"success\"] = code\n result[\"error_msg\"] = format_error()\n\n return result\n\n\ndef dump(str, filter=[]):\n result = None\n if isinstance(str, pymongo.cursor.Cursor) or isinstance(str, list) or isinstance(str,\n pymongo.command_cursor.CommandCursor):\n result = []\n for _s in str:\n if type(_s) == type({}):\n s = {}\n for (k, v) in _s.items():\n if k in filter:\n pass\n elif type(v) == type(ObjectId()):\n s[k] = json.loads(dumps(v))['$oid']\n elif type(v) == type(datetime.datetime.utcnow()):\n # s[k] = v.strftime(\"%Y-%m-%d %H:%M:%S.%f\")[:-3]\n s[k] = v.strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n s[k] = v\n else:\n s = _s\n result.append(s)\n elif isinstance(str, dict):\n result = {}\n for (k, v) in str.items():\n if k in filter:\n pass\n elif type(v) == type(ObjectId()):\n result[k] = json.loads(dumps(v))['$oid']\n elif type(v) == type(datetime.datetime.utcnow()):\n result[k] = v.strftime(\"%Y-%m-%d %H:%M:%S.%f\")[:-3]\n else:\n result[k] = v\n elif str is None:\n result = None\n elif len(str) == 0:\n result = str\n return result\n\n\ndef check_code(checkcode_coll, str, code, type=\"mobile\"):\n # 测试用验证码 888888\n if code == \"888888\":\n return\n if type == \"mobile\":\n checkcode = checkcode_coll.find_one({\"mobile\": str, \"enable_flag\": True})\n # 验证码的有效时间\n if checkcode:\n if code.upper() != checkcode[\"code\"].upper():\n raise Exception(\"填写验证码有误\")\n elif checkcode[\"add_time\"] <= datetime.datetime.now() - datetime.timedelta(minutes=10):\n raise Exception(\"手机验证码过期!\")\n checkcode[\"enable_flag\"] = False\n checkcode_coll.save(checkcode)\n else:\n raise Exception(\"获取手机验证码失败\")\n elif type == \"email\":\n checkcode = checkcode_coll.find_one({\"email\": str, \"enable_flag\": True})\n if checkcode:\n if code.upper() != checkcode[\"code\"].upper():\n raise Exception(\"填写验证码有误!\")\n elif checkcode[\"add_time\"] <= datetime.datetime.now() - datetime.timedelta(hours=30):\n raise Exception(\"邮箱验证码过期!\")\n checkcode[\"enable_flag\"] = False\n checkcode_coll.save(checkcode)\n else:\n raise Exception(\"获取邮箱验证码失败\")\n else:\n raise Exception(\"验证码类型错误!\")\n\n\n# 创建目录\ndef mkdir(path):\n path = path.strip()\n isExist = os.path.exists(path)\n if not isExist:\n os.makedirs(path)\n else:\n pass\n return path\n\n\ndef save_file(path, file_name, data):\n if data == None:\n return\n mkdir(path)\n if (not path.endswith(\"/\")):\n path = path + \"/\"\n file = open(path + file_name, \"wb\")\n file.write(data)\n file.flush()\n file.close()\n\n\ndef check_email(email):\n return re.match(\"^\\w+([-+.]\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*$\", email) is not None\n\n\ndef check_mobile(mobile):\n return re.match(\"^1\\d{10}$\", mobile) is not None\n\n\ndef is_chinese(string):\n pattern = re.compile(u'[\\u4e00-\\u9fa5]+')\n return pattern.search(string)\n\n\ndef str_to_img(uri, string, url=None):\n if url == None:\n url = '/static/ftp/image/' + uri\n\n # string = string.replace(\"data:image/jpeg;base64,\",\"\")\n # missing_padding = 4 - len(string) % 4\n # if missing_padding:\n # string += '=' * missing_padding\n img_data = base64.b64decode(string)\n path_url = get_root_path() + url\n if not os.path.exists(os.path.dirname(path_url)):\n os.makedirs(os.path.dirname(path_url))\n f = open(path_url, \"wb\")\n f.write(img_data)\n f.close()\n return url\n\n\ndef compare_time(dt1, dt2):\n if dt1 > dt2:\n raise Exception(\"开始时间不能大于结束时间!\")\n else:\n return True\n\ndef get_consts(consts_name,value):\n key = value['value']\n try:\n consts_list = const.get(consts_name)\n for c in consts_list:\n if c[1] == key:\n return c[0]\n raise ValueError(\"不存在相应的键值[%s]\"%value)\n except ValueError as e:\n raise Exception(e)\n except:\n raise Exception(\"常量[%s]不存在\"%consts_name)\n\ndef get_status_code(err=None):\n if str(err) == \"Permission denied\":\n return 403\n else:\n return 401\n\n", "sub_path": "libs/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 11576, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "os.path.dirname", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 32, "usage_type": "call"}, {"api_name": "ZhihuiSMB.__file__", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 37, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 45, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 45, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 45, "usage_type": "call"}, {"api_name": "string.digits", "line_number": 47, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 47, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 51, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 56, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 76, "usage_type": "attribute"}, {"api_name": "time.localtime", "line_number": 86, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 90, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 98, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 101, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 101, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 103, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 106, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 117, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 122, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 127, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 127, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 138, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 146, "usage_type": "call"}, {"api_name": "bson.json_util.dumps", "line_number": 146, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 151, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 155, "usage_type": "call"}, {"api_name": "cgi.escape", "line_number": 159, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 176, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 210, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 213, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 217, "usage_type": "call"}, {"api_name": "zipfile.zlib", "line_number": 217, "usage_type": "attribute"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 226, "usage_type": "call"}, {"api_name": "concurrent.futures", "line_number": 226, "usage_type": "name"}, {"api_name": "pymongo.cursor", "line_number": 268, "usage_type": "attribute"}, {"api_name": "pymongo.command_cursor", "line_number": 269, "usage_type": "attribute"}, {"api_name": "bson.objectid.ObjectId", "line_number": 277, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 278, "usage_type": "call"}, {"api_name": "bson.json_util.dumps", "line_number": 278, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 279, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 279, "usage_type": "attribute"}, {"api_name": "bson.objectid.ObjectId", "line_number": 292, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 293, "usage_type": "call"}, {"api_name": "bson.json_util.dumps", "line_number": 293, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 294, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 294, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 315, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 315, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 315, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 326, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 326, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 326, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 341, "usage_type": "call"}, {"api_name": "re.match", "line_number": 360, "usage_type": "call"}, {"api_name": "re.match", "line_number": 364, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 368, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 380, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 382, "usage_type": "call"}, {"api_name": "os.path", "line_number": 382, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 382, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 383, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 383, "usage_type": "call"}, {"api_name": "os.path", "line_number": 383, "usage_type": "attribute"}, {"api_name": "ZhihuiSMB.libs.const.get", "line_number": 399, "usage_type": "call"}, {"api_name": "ZhihuiSMB.libs.const", "line_number": 399, "usage_type": "name"}]} +{"seq_id": "161574004", "text": "\nfrom django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages\nfrom .models import User, Trip\nfrom django.http import JsonResponse\n\n\ndef json_payload(response):\n data = list(Trip.objects.values())\n data_new = Trip.objects.all()\n carter = {\n 'name': '',\n 'age': '',\n 'zip': '',\n }\n\n for key in carter:\n print(key)\n carter[key] = 1\n print(carter)\n # for item in carter:\n # print(item.destination)\n return JsonResponse(carter, safe=False)\n\n\ndef index(request):\n\n context = {\n 'all_trips': Trip.objects.all()\n }\n\n return render(request, 'spark_app/index.html', context)\n\n\ndef login(request):\n return render(request, 'spark_app/login.html')\n\n\ndef logout(request):\n del request.session['user_id']\n return redirect(index)\n\n\ndef process_login(request):\n pending_user = User.objects.validate_login(request.POST)\n if pending_user['status']: # if user is logged in\n request.session['user_id'] = pending_user['user_id']\n return redirect(dashboard)\n else:\n for error in pending_user['errors']:\n messages.error(request, error)\n return redirect('/')\n\n\ndef process_registration(request):\n # Validate User\n pending_user = User.objects.validate_registration(request.POST)\n if pending_user['status']: # if user is logged in\n request.session['user_id'] = pending_user['user_id']\n return redirect(dashboard)\n else:\n for error in pending_user['errors']:\n messages.error(request, error)\n print(error)\n return redirect(register)\n\n\ndef register(request):\n return render(request, 'spark_app/register.html')\n\n# Trips\n\n\ndef create_trip(request):\n return render(request, 'spark_app/create_trip.html')\n\n\ndef process_create_trip(request):\n\n new_trip = Trip.objects.create_trip(\n request.POST, request.session['user_id'])\n\n return redirect(dashboard)\n\n\ndef join_trip(request, trip_id):\n\n user_trip = Trip.objects.join(trip_id, request.session['user_id'])\n\n return redirect(dashboard)\n\n\n# Dashboard\n\ndef dashboard(request):\n\n context = {\n \"all_trips\": Trip.objects.all(),\n \"user_trips\": User.objects.get(id=request.session['user_id']).created_trips.all()\n }\n return render(request, 'spark_app/dashboard.html', context)\n", "sub_path": "apps/spark_app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2368, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "models.Trip.objects.values", "line_number": 9, "usage_type": "call"}, {"api_name": "models.Trip.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "models.Trip", "line_number": 9, "usage_type": "name"}, {"api_name": "models.Trip.objects.all", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Trip.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.Trip", "line_number": 10, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Trip.objects.all", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Trip.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Trip", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 41, "usage_type": "call"}, {"api_name": "models.User.objects.validate_login", "line_number": 45, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 45, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 51, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 51, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 52, "usage_type": "call"}, {"api_name": "models.User.objects.validate_registration", "line_number": 57, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 57, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 63, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 63, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 75, "usage_type": "call"}, {"api_name": "models.Trip.objects.create_trip", "line_number": 80, "usage_type": "call"}, {"api_name": "models.Trip.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "models.Trip", "line_number": 80, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Trip.objects.join", "line_number": 88, "usage_type": "call"}, {"api_name": "models.Trip.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "models.Trip", "line_number": 88, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Trip.objects.all", "line_number": 98, "usage_type": "call"}, {"api_name": "models.Trip.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "models.Trip", "line_number": 98, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 99, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 99, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "507018476", "text": "# pylint: skip-file\nfrom django.db import models\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\n\ndef ticket_directory_path(instance, filename):\n return 'ticket-images/ticket_{0}_images/image_{1}'.format(instance.id, filename)\n\ndef response_ticket_directory_path(instance, filename):\n return 'ticket-images/ticket_{0}_images/response_image_{1}'.format(instance.ticket.id, filename)\n\n# Create your models here.\nclass Management(models.Model):\n \"\"\"Comments\"\"\"\n\n name = models.CharField(\n \"Nombre de la Direccion\",\n max_length=100,\n )\n \n management_chief = models.OneToOneField(\n 'Profile',\n related_name='+',\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n verbose_name=\"Encargado de la Dirección\",\n )\n\n def __str__(self):\n return self.name\n\nclass Department(models.Model):\n \"\"\"Comments\"\"\"\n\n name = models.CharField(\n \"Nombre del Departamento\",\n max_length=100,\n )\n\n department_chief = models.OneToOneField(\n 'Profile',\n related_name='+',\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n verbose_name=\"Encargado del Departamento\",\n )\n \n management = models.ForeignKey(\n Management,\n on_delete=models.CASCADE,\n verbose_name=\"Dirección a la que Pertenece\",\n )\n\n def __str__(self):\n return self.name + \" -> \" + self.management.name \n\nclass Job_Titles(models.Model):\n \"\"\"Comments\"\"\"\n\n IMPORTANCE = (\n (3,'Alto'),\n (2,'Medio'),\n (1,'Bajo'),\n )\n\n job_title = models.CharField(\n \"Cargo\",\n max_length=100,\n )\n\n importance = models.IntegerField(\n \"Importancia\",\n choices=IMPORTANCE,\n )\n\n class Meta:\n verbose_name = \"Job Title\"\n verbose_name_plural = \"Job Titles\"\n\n def __str__(self):\n return self.job_title\n\nDEFAULT_RANK_ID = 1\nclass Profile(models.Model):\n \"\"\"Comments\"\"\"\n \n RANKS = (\n (4,'Administrador'),\n (3,'Equipo'),\n (2,'Supervisor'),\n (1,'Usuario'),\n )\n\n user = models.OneToOneField(\n User,\n on_delete=models.CASCADE,\n )\n\n department = models.ForeignKey(\n Department,\n on_delete=models.CASCADE,\n verbose_name=\"Departamento al que Pertenece\",\n )\n\n job_title = models.ForeignKey(\n Job_Titles,\n on_delete=models.CASCADE,\n verbose_name=\"Cargo dentro de la Organización\",\n )\n\n rank = models.IntegerField(\n \"Rango en El Sistema\",\n choices=RANKS,\n default=DEFAULT_RANK_ID,\n )\n\n avatar = models.ImageField(\n \"Imagen del Perfil\",\n max_length=250,\n null=True,\n blank=True,\n )\n \n def is_worker(self):\n if self.rank in [3,4]:\n return True\n else:\n return False\n \n def is_superviser(self):\n departments = list(Department.objects.all())\n x = False\n d = None\n for department in departments:\n if self.user == department.department_chief.user:\n x = department\n return x \n \n def __str__(self):\n return self.user.get_full_name()\n\nclass Description(models.Model):\n \"\"\"Comments\"\"\"\n\n IMPORTANCE = (\n (3,'Alta'),\n (2,'Media'),\n (1,'Baja'),\n )\n\n description = models.CharField(\n \"Descripción\",\n max_length=250,\n )\n\n importance = models.IntegerField(\n \"Importancia\",\n choices=IMPORTANCE,\n )\n\n department = models.ForeignKey(\n Department,\n on_delete=models.CASCADE,\n verbose_name=\"Departamento al que Pertenece\",\n )\n\n def __str__(self):\n return self.description\n\nDEFAULT_STATUS = 1\nDEFAULT_PRIORITY = 0\nclass Ticket(models.Model):\n \"\"\"Comments\"\"\"\n\n TICKET_STATUS = (\n (1,'Abierto'),\n (2,'En Proceso'),\n (3,'Cerrado'),\n (4,'Rechazado'),\n )\n\n TICKET_PRIORITY = (\n (0,'Sin Asignar'),\n (1,'Baja'),\n (2,'Media'),\n (3,'Alta'),\n )\n\n title = models.CharField(\n \"Titulo del Ticket\",\n max_length=100,\n )\n\n content = models.TextField(\n \"Contenido del Ticket\",\n max_length=1000,\n )\n\n created = models.DateTimeField(\n \"Momento en el que fue Creado\",\n auto_now_add=True,\n )\n\n created_by = models.ForeignKey(\n Profile,\n on_delete=models.CASCADE,\n verbose_name=\"Creador del Ticket\",\n )\n\n status = models.IntegerField(\n \"Estatus del Ticket\",\n choices=TICKET_STATUS,\n default=DEFAULT_STATUS,\n )\n\n priority = models.IntegerField(\n \"Prioridad del Ticket\",\n choices=TICKET_PRIORITY,\n default=DEFAULT_PRIORITY,\n )\n\n description = models.ForeignKey(\n Description,\n on_delete=models.CASCADE,\n verbose_name=\"Descripción del Ticket\",\n )\n\n assigned_to = models.ForeignKey(\n Profile,\n related_name='+',\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n )\n\n image_file = models.ImageField(\n \"Imagen\",\n upload_to=ticket_directory_path,\n max_length=250,\n null=True,\n blank=True,\n )\n \n supervised = models.BooleanField(\n \"Fue Supervisado\",\n default=False,\n )\n\n deleted = models.BooleanField(\n \"Eliminar Ticket\",\n default=False,\n )\n \n # Model Save override \n def save(self, *args, **kwargs):\n if self.id is None:\n saved_image = self.image_file\n self.image_file = None\n super(Ticket, self).save(*args, **kwargs)\n self.image_file = saved_image\n if 'force_insert' in kwargs:\n kwargs.pop('force_insert')\n\n super(Ticket, self).save(*args, **kwargs)\n \n def is_supervised(self):\n if self.supervised:\n return True\n else:\n return False\n \n def _already_supervised(self, *args, **kwargs):\n self.supervised = True\n return super(Ticket, self).save(*args,**kwargs)\n \n def _change_status_to(self,new_status,*args, **kwargs):\n self.status = new_status\n return super(Ticket, self).save(*args,**kwargs)\n\n def _remove(self,*args, **kwargs):\n self.deleted = True\n self.status = 3\n return super(Ticket, self).save(*args,**kwargs)\n\n def _assign_priority(self,*args,**kwargs):\n prio_job = self.created_by.job_title.importance\n prio_des = self.description.importance\n imp = prio_job + prio_des\n\n if imp > 4:\n self.priority = 3\n elif imp > 2:\n self.priority = 2\n else:\n self.priority = 1\n\n return super(Ticket, self).save(*args,**kwargs)\n\n def _assign_ticket(self,*args,**kwargs):\n staff = Profile.objects.filter(rank__in=[3,4],department=self.description.department)\n band = True\n winner = dict()\n\n for worker in staff:\n #Filtramos por Trabajador, por el Tipo de Prioridad y El Estatus\n worker_tickets = Ticket.objects.filter(assigned_to=worker.id,\n priority=self.priority,\n status__in=[1,2],\n deleted=0)\n how_many = len(worker_tickets)\n\n if band:\n winner['worker'] = worker\n winner['how_many'] = how_many\n band = False \n elif how_many < winner['how_many']:\n winner['worker'] = worker\n winner['how_many'] = how_many\n\n self.assigned_to = winner['worker']\n return super(Ticket, self).save(*args,**kwargs)\n \n def _re_assign_ticket(self,id_worker,*args,**kwargs):\n worker = Profile.objects.get(id=id_worker)\n self.assigned_to = worker\n return super(Ticket, self).save(*args,**kwargs)\n \n def __str__(self):\n return self.title\n\nclass Response(models.Model):\n \"\"\"Comments\"\"\"\n\n ticket = models.ForeignKey(\n Ticket,\n on_delete=models.CASCADE,\n verbose_name=\"Ticket Asociado\",\n )\n\n user = models.ForeignKey(\n Profile,\n on_delete=models.CASCADE,\n verbose_name=\"Usuario que Responde\",\n )\n\n response = models.TextField(\n \"Contenido de la Respuesta\",\n max_length=1000,\n )\n\n image_file = models.ImageField(\n \"Imagen\",\n upload_to=response_ticket_directory_path,\n max_length=250,\n null=True,\n blank=True,\n )\n\nclass Ticket_Actions(models.Model):\n \"\"\"Comments\"\"\"\n\n TICKET_ACTIONS = (\n (1,'Abierto'),\n (2,'En Proceso'),\n (3,'Cerrado'),\n (4,'Rechazado')\n )\n\n ticket = models.ForeignKey(\n Ticket,\n on_delete=models.CASCADE,\n verbose_name=\"Ticket\",\n )\n\n action_user = models.ForeignKey(\n Profile,\n on_delete=models.CASCADE,\n verbose_name=\"Usuario que realiza la Acción\",\n )\n\n date = models.DateTimeField(\n \"Fecha de la Acción\",\n auto_now_add=True,\n )\n\n action = models.IntegerField(\n \"Acción del Ticket\",\n choices=TICKET_ACTIONS,\n )\n\n class Meta:\n verbose_name = \"Ticket Action\"\n verbose_name_plural = \"Ticket Actions\"\n\nclass Subscribe(models.Model):\n \"\"\"Comments\"\"\"\n\n ticket = models.ForeignKey(\n Ticket,\n on_delete=models.CASCADE,\n verbose_name=\"Ticket\",\n )\n\n user = models.ForeignKey(\n Profile,\n on_delete=models.CASCADE,\n verbose_name=\"Subscriptor\",\n )\n\n class Meta:\n verbose_name = \"Subscriber\"\n verbose_name_plural = \"Subscribers\"\n\n def __str__(self):\n return self.user.get_full_name() + \" - \" + self.ticket.title", "sub_path": "simpgo_app/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 9945, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.db.models.Model", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 73, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 86, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 86, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 96, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 97, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 96, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 98, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 98, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 101, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 101, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 103, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 103, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 107, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 107, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 109, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 109, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 113, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 113, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 119, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 119, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 144, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 144, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 153, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 153, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 158, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 158, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 163, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 163, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 165, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 165, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 174, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 174, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 191, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 191, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 196, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 196, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 201, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 201, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 206, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 206, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 208, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 208, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 212, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 212, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 218, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 218, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 224, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 224, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 226, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 226, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 230, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 230, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 233, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 233, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 238, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 238, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 246, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 246, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 251, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 251, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 333, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 333, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 336, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 336, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 338, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 338, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 342, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 342, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 344, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 344, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 348, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 348, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 353, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 353, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 361, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 361, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 371, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 371, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 373, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 373, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 377, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 377, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 379, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 379, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 383, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 383, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 388, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 388, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 397, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 397, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 400, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 400, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 402, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 402, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 406, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 406, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 408, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 408, "usage_type": "name"}]} +{"seq_id": "585815245", "text": "# Modules\nimport discord\nimport inspect\nimport re\n\n# notk-bot\nimport Error\nimport Logging as log\nimport LoggingDiscord as dlog\n\nfrom Config import cfg\nfrom DiscordFacsimilies import AuthorStubbed\nfrom DiscordFacsimilies import ContextStubbed\n\nclass GuildBot:\n def __init__(self, bot):\n self.bot = bot\n self.channelBot = None\n self.channelLog = None\n self.roleAmongUs = None\n\n async def setup(self, guild):\n roleMod = None\n\n ctx = ContextStubbed(guild, AuthorStubbed(guild.name))\n\n dlog.debug(ctx, \"Starting {} (before channel located)\".format(__name__))\n\n # Get version information from file\n versionStr=\"\"\n versionPath = 'VERSION'\n versionFile = open(versionPath, 'r')\n try:\n versionStr = versionFile.readline().strip()\n if len(versionStr) < 3: # M.m\n await err(ctx, \"Could not read version information from file: '{}'\".format(versionPath))\n except:\n dlog.serverError(ctx, \"Could not read version information from file: '{}'\".format(versionPath))\n raise\n finally:\n versionFile.close()\n dlog.serverInfo(ctx, \"Version: {}\".format(versionStr))\n\n # Get release notes information from file\n releaseNotes = {}\n releaseNotesPath = 'RELEASE_NOTES'\n releaseNotesFile = open(releaseNotesPath, 'r')\n try:\n rawLine = \"none\"\n while rawLine:\n rawLine = releaseNotesFile.readline()\n line = rawLine.strip()\n match = re.search(r'^([A-Z ]+)$', line)\n if match:\n releaseNotesSection = match.group(1)\n releaseNotes[releaseNotesSection] = \"\"\n elif rawLine:\n releaseNotes[releaseNotesSection] += rawLine\n except Exception as e:\n dlog.serverError(ctx, \"Could not read release notes from file: '{}'\".format(releaseNotesPath))\n raise\n finally:\n releaseNotesFile.close()\n for key in releaseNotes:\n releaseNotes[releaseNotesSection].strip()\n dlog.serverInfo(ctx, \"Release Notes:\\n{}\".format(releaseNotes))\n\n # Check for existing channels\n for channel in guild.channels:\n if channel.name == cfg.cBotChannelName:\n self.channelBot = channel\n elif channel.name == cfg.cLogChannelName:\n self.channelLog = channel\n else:\n continue\n dlog.debug(ctx, 'Found: {}'.format(channel.mention))\n\n # TODO delete\n # if bool(self.channelBot):\n # await self.channelBot.delete(reason=\"pre-setup\")\n # guildBot.channelBot = None\n\n # Create the main bot channel if necessary\n if not self.channelLog:\n dlog.debug(ctx, 'Creating {} log channel: `#{}`'.format(self.bot.user.mention, cfg.cLogChannelName))\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(send_messages=False),\n guild.me: discord.PermissionOverwrite(\\\n manage_messages=True,\\\n read_messages=True,\\\n send_messages=True)\n }\n self.channelLog = await guild.create_text_channel(\\\n name=cfg.cLogChannelName,\\\n overwrites=overwrites,\\\n topic=\"NOTK Bot Log\",\\\n reason=\"Need a place to put logs\")\n self.info(ctx, 'Created {} log channel: `#{}`'.format(self.bot.user.mention, self.channelLog.mention))\n\n await self.info(ctx, \"Starting {}\".format(__name__))\n\n # TODO delete\n # for role in ctx.guild.roles:\n # if role.name == cfg.cAmongUsRoleName:\n # await role.delete(reason=\"cleanup\")\n\n # Check for existing roles\n modNames = []\n for role in guild.roles:\n modNames.append(role.name)\n if (role.name.lower() == cfg.cRoleModPrefix.lower()) |\\\n ((not roleMod) &\\\n (role.name.lower().startswith(cfg.cRoleModPrefix.lower()) |\\\n (cfg.cRoleModSubstring.lower() in role.name.lower()))):\n roleMod = role\n elif role.name.lower() == cfg.cAmongUsRoleName.lower():\n self.roleAmongUs = role\n else:\n continue\n # DO NOT mention the role. We don't need to tag all the players in this log message, lol.\n await self.info(ctx, 'Found: `@{}`'.format(role.name))\n dlog.serverInfo(ctx, 'Roles: `{}`'.format('`, `@'.join(modNames)))\n\n if not roleMod:\n dlog.serverWarn(ctx, \"{} role not found.\".format(cfg.cRoleModPrefix))\n\n # Create the role\n if not self.roleAmongUs:\n await self.info(ctx, 'Creating `@{}`'.format(cfg.cAmongUsRoleName))\n self.roleAmongUs = await guild.create_role(\\\n name=cfg.cAmongUsRoleName,\\\n mentionable=True,\\\n hoist=False,\\\n reason=\"Allow users to easily ping everyone interested in playing Among Us.\")\n #colour=Colour.gold,\\\n\n # Check the existing pinned messages and parse data from them\n amongUsRoleMessage = None\n releaseNotesMessage = None\n if self.channelBot:\n # FIXME Parse all history until we find the instructional message\n for message in await self.channelBot.history(limit=1000).flatten():\n if message.author.id == self.bot.user.id:\n # dlog.serverInfo(ctx, 'Found message in {}: [{}]'.format(\\\n # self.channelBot.mention,\\\n # message.content))\n if cfg.cInstructionalLine in message.content.partition('\\n')[0]:\n amongUsRoleMessage = message\n dlog.serverInfo(ctx, 'Found {} instructional message in {}: {}'.format(\\\n message.author.mention,\\\n self.channelBot.mention,\\\n message.jump_url))\n elif message.content.startswith(cfg.cReleaseNotes):\n releaseNotesMessage = message\n dlog.serverInfo(ctx, 'Found {} release notes message in {}: {}'.format(\\\n message.author.mention,\\\n self.channelBot.mention,\\\n message.jump_url))\n\n # Create main bot channel\n if not self.channelBot:\n dlog.debug(ctx, 'Creating {} channel: `#{}`'.format(self.bot.user.mention, cfg.cBotChannelName))\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(send_messages=False),\n guild.me: discord.PermissionOverwrite(\\\n manage_messages=True,\\\n read_messages=True,\\\n send_messages=True)\n }\n self.channelBot = await guild.create_text_channel(\\\n name=cfg.cBotChannelName,\\\n overwrites=overwrites,\\\n topic=\"NOTK Bot\",\\\n reason=\"Need a place to put our instructional message and send join/leave notifications\")\n await self.info(ctx, 'Created {} channel: {}'.format(self.bot.user.mention, self.channelBot.mention))\n\n await self.channelBot.send(\\\n content=\"{}{} has added support for the Among Us player group via the {} role.\".format(\\\n roleMod.mention + \", \" if roleMod else \"\",\\\n self.bot.user.mention,\\\n self.roleAmongUs.mention))\n\n releaseNotesLatestSection = \"\"\n if releaseNotes[cfg.cExternalChanges]:\n releaseNotesLatestSection = \"Version {}:\\n{}\".format(\\\n versionStr,\\\n releaseNotes[cfg.cExternalChanges])\n releaseNotesLatestSection\n\n # Handle release notes pinned message\n oldVersionStr = None\n releaseNotesSections = []\n index = -1\n if releaseNotesMessage:\n for line in releaseNotesMessage.content.splitlines():\n match = re.search(r'^Version ([0-9]+\\.[0-9]+):$', line)\n if match:\n if not oldVersionStr:\n oldVersionStr = match.group(1)\n releaseNotesSections.append(line + \"\\n\")\n index += 1\n elif index >= 0:\n releaseNotesSections[index] += line + \"\\n\"\n if not oldVersionStr:\n oldVersionStr = \"0.0\"\n\n if len(releaseNotesSections):\n if versionStr == oldVersionStr:\n releaseNotesSections[0] = releaseNotesLatestSection\n else:\n releaseNotesSections.insert(0, releaseNotesLatestSection)\n else:\n releaseNotesSections = [ releaseNotesLatestSection ]\n releaseNotesMessageText = \"{}\\n\\n{}\".format(cfg.cReleaseNotes, \"\".join(releaseNotesSections))\n\n if not releaseNotesMessage:\n await self.info(ctx, 'Sending `@{}` release notes message'.format(cfg.cAmongUsRoleName))\n releaseNotesMessage = await self.channelBot.send(content=releaseNotesMessageText)\n elif releaseNotesMessage.content == releaseNotesMessageText:\n # This should indicate that this is a simple restart.\n dlog.serverInfo(ctx, 'Found up-to-date {} release notes message in {}: {}'.format(\\\n releaseNotesMessage.author.mention,\\\n self.channelBot.mention,\\\n releaseNotesMessage.jump_url))\n else:\n await self.info(ctx, 'Updating old {} release notes message in {}: {}'.format(\\\n releaseNotesMessage.author.mention,\\\n self.channelBot.mention,\\\n releaseNotesMessage.jump_url))\n await releaseNotesMessage.edit(content=releaseNotesMessageText)\n if (versionStr != oldVersionStr):\n await self.channelBot.send(content=\"{}{} has been updated!\\n{}\".format(\\\n roleMod.mention + \", \" if roleMod else \"\",\\\n self.bot.user.mention,\\\n releaseNotesSections[0]))\n if releaseNotesMessage.pinned:\n await self.info(ctx, '`@{}` release notes message already pinned.'.format(cfg.cAmongUsRoleName))\n else:\n await self.info(ctx, 'Pinning `@{}` release notes message'.format(cfg.cAmongUsRoleName))\n await releaseNotesMessage.pin(\\\n reason=\"The `@{}` release notes message will get buried if it isn't pinned\".format(cfg.kBotName))\n\n # Handle instructional pinned message\n amongUsRoleMessageText = \\\n \"\"\"{}\nType `{}` in any public channel to be notified about NOTK Among Us game sessions.\nType `{}` in any public channel if you no longer want to be notified.\n{}\nTag the `{}` role to ping all Among Us players like so: {}\nI recommend muting the {} channel; it is only for logging purposes and will be very noisy.\"\"\".format(\\\n cfg.cInstructionalLine,\\\n cfg.cAmongUsJoinRequestMessageText,\\\n cfg.cAmongUsLeaveRequestMessageText,\\\n cfg.cAmongUsSendGameNotificationText,\\\n cfg.cAmongUsRoleName,\\\n self.roleAmongUs.mention,\\\n self.channelLog.mention)\n if not amongUsRoleMessage:\n await self.info(ctx, 'Sending `@{}` instructional message'.format(cfg.cAmongUsRoleName))\n amongUsRoleMessage = await self.channelBot.send(content=amongUsRoleMessageText)\n elif amongUsRoleMessage.content == amongUsRoleMessageText:\n # This should indicate that this is a simple restart.\n dlog.serverInfo(ctx, 'Found up-to-date {} instructional message in {}: {}'.format(\\\n amongUsRoleMessage.author.mention,\\\n self.channelBot.mention,\\\n amongUsRoleMessage.jump_url))\n else:\n await self.info(ctx, 'Updating old {} instructional message in {}: {}'.format(\\\n amongUsRoleMessage.author.mention,\\\n self.channelBot.mention,\\\n amongUsRoleMessage.jump_url))\n await amongUsRoleMessage.edit(content=amongUsRoleMessageText)\n if amongUsRoleMessage.pinned:\n dlog.serverInfo(ctx, '`@{}` instructional message already pinned.'.format(cfg.cAmongUsRoleName))\n else:\n await self.info(ctx, 'Pinning `@{}` instructional message'.format(cfg.cAmongUsRoleName))\n await amongUsRoleMessage.pin(\\\n reason=\"The `@{}` instructional message needs to be very visible to be useful\".format(cfg.kBotName))\n\n await self.info(ctx, \"{} started.\".format(__name__))\n\n async def Command(self, ctx, cmd, *args):\n dlog.debug(ctx, \"Processing command: `{} {}`\".format(cmd, \" \".join(args)))\n\n # Parse the arguments as tagged members\n members = []\n memberNames = []\n resolved = []\n if cmd in [ cfg.cCommandJoin, cfg.cCommandLeave]:\n if len(args) > 0:\n userIDs = {}\n userNames = []\n for arg in args:\n if arg.startswith('<@') & arg.endswith('>'):\n userID = arg[2:-1]\n while userID.startswith('!') | userID.startswith('&'):\n userID = userID[1:len(userID)]\n userIDs[userID] = arg\n else:\n userNames.append(arg)\n for userID in userIDs:\n try:\n member = await ctx.guild.fetch_member(userID)\n except Exception as e:\n dlog.serverWarn(ctx, \"userID `{}`: {}\".format(userID, str(e)))\n except:\n dlog.serverWarn(ctx, \"userID `{}`: {}\".format(userID, str(sys.exc_info()[0])))\n else:\n if member.name not in memberNames:\n resolved.append(userIDs[userID])\n members.append(member)\n memberNames.append(member.name)\n else:\n member = await ctx.guild.fetch_member(ctx.author.id)\n members = [member]\n memberNames = [member.name]\n missing = set(args) - set(resolved)\n if (len(missing) > 0):\n await dlog.warn(self, ctx, \"Could not find `{}` members: `{}`!\".format(ctx.guild.name, \"`, `\".join(missing)))\n\n if cmd == cfg.cCommandJoin:\n await self.AddAmongUsPlayer(ctx, members)\n elif cmd == cfg.cCommandLeave:\n await self.RemoveAmongUsPlayer(ctx, members)\n elif cmd == cfg.cCommandNewGame:\n await self.NotifyAmongUsGame(ctx, ctx.message.channel, args[0])\n else:\n await Error.dErr(self, ctx, \"Invalid command `{}`.\".format(cmd))\n\n async def AddAmongUsPlayer(self, ctx, members):\n alreadyMemberNames = []\n for member in members:\n if self.roleAmongUs in member.roles:\n alreadyMemberNames.append(member.name)\n else:\n await self.info(ctx, \"Adding `@{}` to the `@{}` players\".format(member.name, self.roleAmongUs.name))\n await member.add_roles(\\\n self.roleAmongUs,\\\n reason=\"{} requested for {} to be pinged regarding Among Us games\".format(\\\n ctx.author.name,\\\n member.name))\n await self.channelBot.send(\\\n content=\"Hey `@{}` players! {} is now among the Among Us players!\".format(\\\n self.roleAmongUs.name,\\\n member.mention))\n if not member.bot:\n await member.send(\\\n content=\"You have been added to `{}`'s Among Us players. Type `{}` in any public channel in `{}` to \" +\n \"leave the Among Us players.\".format(\\\n ctx.guild.name,\\\n cfg.cAmongUsLeaveRequestMessageText,\\\n ctx.guild.name))\n if (len(alreadyMemberNames) > 0):\n await dlog.warn(self, ctx, \"`@{}` {} already among the `@{}` players\".format(\\\n \"`, `@\".join(alreadyMemberNames),\\\n \"is\" if len(alreadyMemberNames) == 1 else \"are\",\\\n self.roleAmongUs.name))\n\n async def RemoveAmongUsPlayer(self, ctx, members):\n missingMemberNames = []\n for member in members:\n if self.roleAmongUs in member.roles:\n await self.info(ctx, \"Removing `@{}` from the `@{}` players\".format(member.name, self.roleAmongUs.name))\n await member.remove_roles(\\\n self.roleAmongUs,\\\n reason=\"{} requested for {} to no longer receive pings regarding Among Us games\".format(\\\n ctx.author.name,\\\n member.name))\n await self.channelBot.send(content=\"{} is now Among The Hidden.\".format(member.mention))\n if not member.bot:\n await member.send(content=\"You have been remove from `{}`'s Among Us players.\".format(ctx.guild.name))\n else:\n missingMemberNames.append(member.name)\n if (len(missingMemberNames) > 0):\n await self.warn(ctx, \"`@{}` isn't among the `@{}` players\".format(\\\n \"`, `@\".join(missingMemberNames),\\\n self.roleAmongUs.name))\n\n async def NotifyAmongUsGame(self, ctx, channel, code):\n match = re.compile(r'^([A-Za-z]{6})$').search(code)\n if not match:\n await self.errMinor(ctx, \"Bad room code `{}`. Must be six letters.\".format(code))\n code = code.upper()\n await self.info(ctx, \"Notifying `@{}` of Among Us game code `{}` in `#{}`\".format(self.roleAmongUs.name, code, channel.name))\n await channel.send(\\\n content=\"Attention {}! New game code: `{}`. Type `{}` if you no longer want receive these notifications. {}\".format(\\\n self.roleAmongUs.mention,\\\n code,\\\n cfg.cAmongUsLeaveRequestMessageText,\\\n cfg.cAmongUsSendGameNotificationText))\n # codeSpelled = re.sub(r\"([A-Z])\", r\"\\1 \", code)\n # await channel.send(content=\"New game code: `{}`.\".format(codeSpelled),\\\n # tts=True)\n\n async def info(self, ctx, msg):\n await dlog.info(self, ctx, msg)\n\n async def warn(self, ctx, msg):\n await dlog.warn(self, ctx, msg)\n\n async def err(self, ctx, msg):\n await Error.dErr(self, ctx, msg)\n\n async def errMinor(self, ctx, msg):\n await Error.errMinor(self, ctx, msg)\n\n", "sub_path": "src/GuildBot.py", "file_name": "GuildBot.py", "file_ext": "py", "file_size_in_byte": 16519, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "DiscordFacsimilies.ContextStubbed", "line_number": 25, "usage_type": "call"}, {"api_name": "DiscordFacsimilies.AuthorStubbed", "line_number": 25, "usage_type": "call"}, {"api_name": "LoggingDiscord.debug", "line_number": 27, "usage_type": "call"}, {"api_name": "LoggingDiscord.serverError", "line_number": 38, "usage_type": "call"}, {"api_name": "LoggingDiscord.serverInfo", "line_number": 42, "usage_type": "call"}, {"api_name": "re.search", "line_number": 53, "usage_type": "call"}, {"api_name": "LoggingDiscord.serverError", "line_number": 60, "usage_type": "call"}, {"api_name": "LoggingDiscord.serverInfo", "line_number": 66, "usage_type": "call"}, {"api_name": "Config.cfg.cBotChannelName", "line_number": 70, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 70, "usage_type": "name"}, {"api_name": "Config.cfg.cLogChannelName", "line_number": 72, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 72, "usage_type": "name"}, {"api_name": "LoggingDiscord.debug", "line_number": 76, "usage_type": "call"}, {"api_name": "LoggingDiscord.debug", "line_number": 85, "usage_type": "call"}, {"api_name": "Config.cfg.cLogChannelName", "line_number": 85, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 85, "usage_type": "name"}, {"api_name": "discord.PermissionOverwrite", "line_number": 87, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 88, "usage_type": "call"}, {"api_name": "Config.cfg.cLogChannelName", "line_number": 94, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 94, "usage_type": "name"}, {"api_name": "Config.cfg.cRoleModPrefix.lower", "line_number": 111, "usage_type": "call"}, {"api_name": "Config.cfg.cRoleModPrefix", "line_number": 111, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 111, "usage_type": "name"}, {"api_name": "Config.cfg.cRoleModPrefix.lower", "line_number": 113, "usage_type": "call"}, {"api_name": "Config.cfg.cRoleModPrefix", "line_number": 113, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 113, "usage_type": "name"}, {"api_name": "Config.cfg.cRoleModSubstring.lower", "line_number": 114, "usage_type": "call"}, {"api_name": "Config.cfg.cRoleModSubstring", "line_number": 114, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 114, "usage_type": "name"}, {"api_name": "Config.cfg.cAmongUsRoleName.lower", "line_number": 116, "usage_type": "call"}, {"api_name": "Config.cfg.cAmongUsRoleName", "line_number": 116, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 116, "usage_type": "name"}, {"api_name": "LoggingDiscord.serverInfo", "line_number": 122, "usage_type": "call"}, {"api_name": "LoggingDiscord.serverWarn", "line_number": 125, "usage_type": "call"}, {"api_name": "Config.cfg.cRoleModPrefix", "line_number": 125, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 125, "usage_type": "name"}, {"api_name": "Config.cfg.cAmongUsRoleName", "line_number": 129, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 129, "usage_type": "name"}, {"api_name": "Config.cfg.cAmongUsRoleName", "line_number": 131, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 131, "usage_type": "name"}, {"api_name": "Config.cfg.cInstructionalLine", "line_number": 147, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 147, "usage_type": "name"}, {"api_name": "LoggingDiscord.serverInfo", "line_number": 149, "usage_type": "call"}, {"api_name": "Config.cfg.cReleaseNotes", "line_number": 153, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 153, "usage_type": "name"}, {"api_name": "LoggingDiscord.serverInfo", "line_number": 155, "usage_type": "call"}, {"api_name": "LoggingDiscord.debug", "line_number": 162, "usage_type": "call"}, {"api_name": "Config.cfg.cBotChannelName", "line_number": 162, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 162, "usage_type": "name"}, {"api_name": "discord.PermissionOverwrite", "line_number": 164, "usage_type": "call"}, {"api_name": "discord.PermissionOverwrite", "line_number": 165, "usage_type": "call"}, {"api_name": "Config.cfg.cBotChannelName", "line_number": 171, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 171, "usage_type": "name"}, {"api_name": "Config.cfg.cExternalChanges", "line_number": 184, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 184, "usage_type": "name"}, {"api_name": "Config.cfg.cExternalChanges", "line_number": 187, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 187, "usage_type": "name"}, {"api_name": "re.search", "line_number": 196, "usage_type": "call"}, {"api_name": "Config.cfg.cReleaseNotes", "line_number": 214, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 214, "usage_type": "name"}, {"api_name": "Config.cfg.cAmongUsRoleName", "line_number": 217, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 217, "usage_type": "name"}, {"api_name": "LoggingDiscord.serverInfo", "line_number": 221, "usage_type": "call"}, {"api_name": "Config.cfg.cAmongUsRoleName", "line_number": 237, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 237, "usage_type": "name"}, {"api_name": "Config.cfg.cAmongUsRoleName", "line_number": 239, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 239, "usage_type": "name"}, {"api_name": "Config.cfg.kBotName", "line_number": 241, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 241, "usage_type": "name"}, {"api_name": "Config.cfg.cInstructionalLine", "line_number": 251, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 251, "usage_type": "name"}, {"api_name": "Config.cfg.cAmongUsJoinRequestMessageText", "line_number": 252, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 252, "usage_type": "name"}, {"api_name": "Config.cfg.cAmongUsLeaveRequestMessageText", "line_number": 253, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 253, "usage_type": "name"}, {"api_name": "Config.cfg.cAmongUsSendGameNotificationText", "line_number": 254, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 254, "usage_type": "name"}, {"api_name": "Config.cfg.cAmongUsRoleName", "line_number": 255, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 255, "usage_type": "name"}, {"api_name": "Config.cfg.cAmongUsRoleName", "line_number": 259, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 259, "usage_type": "name"}, {"api_name": "LoggingDiscord.serverInfo", "line_number": 263, "usage_type": "call"}, {"api_name": "LoggingDiscord.serverInfo", "line_number": 274, "usage_type": "call"}, {"api_name": "Config.cfg.cAmongUsRoleName", "line_number": 274, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 274, "usage_type": "name"}, {"api_name": "Config.cfg.cAmongUsRoleName", "line_number": 276, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 276, "usage_type": "name"}, {"api_name": "Config.cfg.kBotName", "line_number": 278, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 278, "usage_type": "name"}, {"api_name": "LoggingDiscord.debug", "line_number": 283, "usage_type": "call"}, {"api_name": "Config.cfg.cCommandJoin", "line_number": 289, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 289, "usage_type": "name"}, {"api_name": "Config.cfg.cCommandLeave", "line_number": 289, "usage_type": "attribute"}, {"api_name": "LoggingDiscord.serverWarn", "line_number": 305, "usage_type": "call"}, {"api_name": "LoggingDiscord.serverWarn", "line_number": 307, "usage_type": "call"}, {"api_name": "LoggingDiscord.warn", "line_number": 319, "usage_type": "call"}, {"api_name": "Config.cfg.cCommandJoin", "line_number": 321, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 321, "usage_type": "name"}, {"api_name": "Config.cfg.cCommandLeave", "line_number": 323, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 323, "usage_type": "name"}, {"api_name": "Config.cfg.cCommandNewGame", "line_number": 325, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 325, "usage_type": "name"}, {"api_name": "Error.dErr", "line_number": 328, "usage_type": "call"}, {"api_name": "Config.cfg.cAmongUsLeaveRequestMessageText", "line_number": 351, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 351, "usage_type": "name"}, {"api_name": "LoggingDiscord.warn", "line_number": 354, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 380, "usage_type": "call"}, {"api_name": "Config.cfg.cAmongUsLeaveRequestMessageText", "line_number": 389, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 389, "usage_type": "name"}, {"api_name": "Config.cfg.cAmongUsSendGameNotificationText", "line_number": 390, "usage_type": "attribute"}, {"api_name": "Config.cfg", "line_number": 390, "usage_type": "name"}, {"api_name": "LoggingDiscord.info", "line_number": 396, "usage_type": "call"}, {"api_name": "LoggingDiscord.warn", "line_number": 399, "usage_type": "call"}, {"api_name": "Error.dErr", "line_number": 402, "usage_type": "call"}, {"api_name": "Error.errMinor", "line_number": 405, "usage_type": "call"}]} +{"seq_id": "7412977", "text": "\"\"\"\nSource:\nhttps://stackoverflow.com/questions/34959455/detect-parking-lot-by-opencv\n\"\"\"\n\nfrom __future__ import division \nfrom collections import defaultdict\nfrom collections import OrderedDict\nfrom cv2 import line\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom networkx.algorithms import swap\nfrom numpy import mat\nfrom skimage.exposure import exposure\nimport numpy as np\nimport imutils\nfrom numpy.core.defchararray import rindex\nimport sys\n\ndef line(p1, p2):\n A = (p1[1] - p2[1])\n B = (p2[0] - p1[0])\n C = (p1[0]*p2[1] - p2[0]*p1[1])\n return A, B, -C\n\ndef intersection(L1, L2):\n D = L1[0] * L2[1] - L1[1] * L2[0]\n Dx = L1[2] * L2[1] - L1[1] * L2[2]\n Dy = L1[0] * L2[2] - L1[2] * L2[0]\n if D != 0:\n x = Dx / D\n y = Dy / D\n return x,y\n else:\n return False\n\ndef comupteIntersect(hline,vline):\n hx1=hline[0];hy1=hline[1];hx2=hline[2];hy2=hline[3];\n vx3=vline[0];vy3=vline[1];vx4=vline[2];vy4=vline[3];\n\n\n return 0;\n\ninput = sys.argv[1]\n\n# CascadeClassifier class to detect objects. cas1.xml will have the trained data\nface_cascade = cv2.CascadeClassifier(sys.argv[2])\n\nprint(sys.argv[1],sys.argv[2])\n# im will have the input in image format\nim = cv2.imread(input)\nim2=im\n\ncv2.waitKey(0)\n\n# cvtColor Converts an image from one color space to another.\ngray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n# apply diverse linear filters to smooth images using GaussianBlur \nblur = cv2.GaussianBlur(gray,(5,15),0)\n# apply segmentation \n# Application example: Separate out regions of an image corresponding to objects which we want to analyze. This separation is based on the variation of intensity between the object pixels and the background pixels.\n# To differentiate the pixels we are interested in from the rest (which will eventually be rejected), we perform a comparison of each pixel intensity value with respect to a threshold (determined according to the problem to solve).\n# Once we have separated properly the important pixels, we can set them with a determined value to identify them (i.e. we can assign them a value of 0 (black), 255 (white) or any value that suits your needs).\n\nret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n\n# Contours can be explained simply as a curve joining all the continuous points (along the boundary), having same color or intensity. The contours are a useful tool for shape analysis and object detection and recognition.\n# \n# For better accuracy, use binary images. So before finding contours, apply threshold or canny edge detection.\n# findContours function modifies the source image. So if you want source image even after finding contours, already store it to some other variables.\n# In OpenCV, finding contours is like finding white object from black background. So remember, object to be found should be white and background should be black.\n_, contours, hierarchy = cv2.findContours(th3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n# by here skeleton would have been drawn\n\n#to draw the contour in the image enable the below line\n#img = cv2.drawContours(im, contours, -1, (0,255,0), 1)\nidx =0\ncrop_rect= 0\nfor cnt in contours:\n x,y,w,h = cv2.boundingRect(cnt)\n print('x, y, w, h', x, y, w, h)\n if w-x>900 and h-y>100: \n roi=im[y:y+h,x:x+w]\n crop_rect=im[y:y+h,x:x+w]\n #cv2.imshow('crop_rect',crop_rect)\n #cv2.waitKey(0)\n idx+=1\n cv2.imwrite('crp_contour'+str(idx) + '.jpg', crop_rect)\n \n\"\"\"if crop_rect == 0:\n im4=im\n im3=im\n\n gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray,(5,15),0)\n ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n _, contours, hierarchy = cv2.findContours(th3,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\n rect=None\n\"\"\"\n#else: \nim4=crop_rect\nim3=crop_rect\n \ngray=cv2.cvtColor(crop_rect,cv2.COLOR_BGR2GRAY)\nblur = cv2.GaussianBlur(gray,(5,15),0)\nret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n_, contours, hierarchy = cv2.findContours(th3,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\nrect=None\n\nfor cnt in contours:\n x1=[]\n y1=[]\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n x1.append(box[0][0]);\n x1.append(box[1][0]);\n x1.append(box[2][0]);\n x1.append(box[3][0]);\n y1.append(box[0][1]);\n y1.append(box[1][1]);\n y1.append(box[2][1]);\n y1.append(box[3][1]);\n x=np.amin(x1)\n y=np.amin(y1)\n w=np.amax(x1)\n h=np.amax(y1)\n# re = cv2.rectangle([box])\n# x,y,w,h = cv2.boundingRect(cnt)\n if w-x>900 and h-y>100:\n rect = cv2.minAreaRect(cnt)\n box = cv2.cv.BoxPoints(rect)\n box = np.int0(box)\n x,y,w,h = cv2.boundingRect(cnt)\n# crop_rect1=crop_rect[y:y+h,x:x+w]\n# cv2.imshow('crop_rect',crop_rect1)\n# cv2.waitKey(0)\n break\n\n#( top-left corner(x,y), (width, height), angle of rotation )\nx=rect[0][0]\ny=rect[0][1]\nw=rect[1][0]\nh=rect[1][1]\nangle=rect[2]\nif rect[2]<-45:\n angle += 90.0;\n temp=w\n w=h\n h=temp\n\ncenter=(x+w)/2,(y+h)/2\n\nif crop_rect == 0:\n img = im.copy()\n rot_mat = cv2.getRotationMatrix2D(center, angle, 1);\n dst=cv2.warpAffine(im,rot_mat, (int(w),int(h)));\n # cv2.imshow('Rotated and Cropped Image',dst)\n # cv2.waitKey(0)\nelse:\n img=crop_rect.copy()\n rot_mat = cv2.getRotationMatrix2D(center, angle, 1);\n dst=cv2.warpAffine(crop_rect,rot_mat, (int(w),int(h)));\n # cv2.imshow('Rotated and Cropped Image',dst)\n # cv2.waitKey(0)\n\n\nhorizontal = []\n\nim6=dst\nim4=im6\nim3=im6 \n\ngray=cv2.cvtColor(im6,cv2.COLOR_BGR2GRAY)\nedges = cv2.Canny(gray,50,150,apertureSize = 3)\n# cv2.imshow('edges Image',edges)\n# cv2.waitKey(0)\n\n# Find the edge of the image\n# lines = cv2.HoughLines(edges,1,np.pi/95,40)\nlines = cv2.HoughLines(edges,1,np.pi/180,40)\nfor rho,theta in lines[0]:\n pt1 = []\n im5=im6 \n if (thetanp.pi/180*88):\n #if (rho==592.0):\n if (rho==78.0): \n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n pt1.append(x1)\n pt1.append(y1)\n pt1.append(x2)\n pt1.append(y2)\n horizontal.append(pt1)\n cv2.line(im5,(x1,y1),(x2,y2),(0,0,255),2)\n# cv2.imshow('for',im5)\n# cv2.waitKey(0)\n break\n#\n\ndiff = h-y\n\ntoty1 = diff+y1+20.0\ntoty2 = diff+y2+20.0\n\n#cv2.line(im5,(int(x1),int(toty1)),(int(x2),int(toty2)),(0,0,255),2)\npt1 = []\npt1.append(int(x1))\npt1.append(int(toty1))\npt1.append(int(x2))\npt1.append(int(toty2))\nhorizontal.append(pt1)\n\nminLineLength = 50\nmaxLineGap = 10\nim7=im3\ngray = cv2.cvtColor(im5, cv2.COLOR_BGR2GRAY)\ngray = cv2.bilateralFilter(gray, 11, 17, 17)\nedged = cv2.Canny(gray, 30, 200)\nm,n = gray.shape\nL=[]\nlines = cv2.HoughLines(edged, 2, np.pi/180,10,0,0)[0]\n# or theta>np.pi/180*80 and thetanp.pi/180*170 or thetanp.pi/180*170)\n if(theta>np.pi/180*170 or theta str:\n ''' Looks for translation of 'text' in translation.json file\n\n Parameters\n ----------\n text : str\n English string to lookup in translation.json file\n\n Returns\n -------\n str\n mapped string to 'text' in translation.json file\n '''\n # Get translations from translations.json file\n # and save it to translations dictionary\n translations = getTranslations()\n # Return mapped string to 'text' in the translations dictionary\n try:\n # Return translation if it exists\n # or return the given string if there's no translation!\n return translations[text] if translations[text] else text\n except KeyError: # This exception means there's no match for given string\n # So We'll return the given text\n return text\n\n\nclass ConfigForm(FlaskForm): # Config page form\n title = StringField(\n 'title',\n validators=[\n DataRequired(message=tr('Title is Required.')),\n Length(min=1,\n max=64,\n message=tr('Title must be between '+ \\\n '1 and 64 characters long.'))\n ],\n render_kw={'maxlength': 64})\n desc = StringField('desc',\n validators=[DataRequired(\n message=tr('Description is Required.')),\n Length(min=1, max=256,\n message=tr('Description must be between '+ \\\n '1 and 256 characters long.'))],\n render_kw={'maxlength': 256})\n dispname = StringField('dispname',\n validators=[DataRequired(\n message=tr('Display Name is Required.')),\n Length(min=1, max=32,\n message=tr('Display Name must be '+ \\\n 'between 1 and 32 characters long.'))],\n render_kw={'maxlength': 32})\n mailaddr = EmailField(\n 'mailaddr',\n validators=[DataRequired(message=tr('EMail is Required.')),\n Email(message=tr('Please check the email format.')),\n Length(min=3, max=254, message=tr('EMail must be '+ \\\n 'between 3 and 254 characters long.'))],\n render_kw={'maxlength': 254})\n dtformat = StringField('dtformat',\n validators=[DataRequired(\n message=tr('Date/Time Format is Required.')),\n Length(min=2, max=32,\n message=tr('Date/Time Format must be '+ \\\n 'between 2 and 32 characters long.'))],\n render_kw={'maxlength': 32})\n calendar = SelectField(\n 'calendar',\n validators=[\n DataRequired(message=tr('Calendar Type Setting is Required.'))\n ],\n choices=[('Gregorian', tr('Gregorian')), ('Jalali', tr('Jalali'))])\n autoapproval = SelectField(\n 'autoapproval',\n validators=[\n DataRequired(message=tr('AutoApproval Setting is Required.'))\n ],\n choices=[('Yes', tr('Yes')), ('No', tr('No'))])\n disablecomments = SelectField(\n 'disablecomments',\n validators=[\n DataRequired(message=tr('Disabling Comments Setting is Required.'))\n ],\n choices=[('Yes', tr('Yes')), ('No', tr('No'))])\n currpwd = PasswordField(\n 'currpwd',\n validators=[\n InputRequired(message=tr('Current Password Required.')),\n Length(min=5, max=128, message=tr('Current Password must be '+ \\\n 'between 5 and 128 characters long.'))\n ],\n render_kw={\n 'minlength': 5,\n 'maxlength': 128\n })\n newpwd = PasswordField(\n 'newpwd',\n validators=[Optional(),\n Length(min=8, max=128,\n message=tr('New Password must be '+ \\\n 'between 8 and 128 characters long.')),\n EqualTo('confirmpwd',\n message=tr('New passwords do not match.'))],\n id='pwd1',\n render_kw={\n 'minlength': 8,\n 'maxlength': 128\n })\n confirmpwd = PasswordField('confirmpwd',\n validators=[Optional(),\n Length(min=8, max=128)],\n id='pwd2',\n render_kw={\n 'minlength': 8,\n 'maxlength': 128\n })\n ppp = IntegerField(\n 'ppp',\n validators=[InputRequired(tr('Posts Per Page Setting Required.')),\n NumberRange(min=1, max=9999999999999999,\n message=tr('Posts Per Page Setting must be '+ \\\n 'between 1 and 16 digits long.'))],\n render_kw={'maxlength': 16})\n\n\nclass CommentForm(FlaskForm): # Comment page form\n name = StringField('name',\n validators=[DataRequired(\n message=tr('Name is Required.')),\n Length(min=1, max=24,\n message=tr('Name must be between '+ \\\n '1 and 24 characters long.'))],\n render_kw={'maxlength': 24})\n mailaddr = EmailField(\n 'mailaddr',\n validators=[Optional(), Email(\n message=tr('Please check the email format.')),\n Length(min=3, max=40,\n message=tr('EMail must be between '+ \\\n '1 and 40 characters long.'))],\n render_kw={\n 'minlength': 3,\n 'maxlength': 40\n })\n website = URLField('website',\n validators=[\n Optional(),\n URL(message=tr('Please check the url format.')),\n Length(min=3, max=40)\n ],\n render_kw={\n 'minlength': 3,\n 'maxlength': 40\n })\n content = StringField('content',\n validators=[DataRequired(\n message=tr('Content is Required.')),\n Length(min=1, max=256,\n message=tr('Content must be between '+ \\\n '1 and 255 characters long.'))],\n render_kw={'maxlength': 255})\n postid = IntegerField(\n 'postid',\n validators=[InputRequired(),\n NumberRange(min=1, max=9999999999999999)],\n widget=HiddenInput())\n\n\nclass PostForm(FlaskForm): # Post page form\n category = SelectField('category', coerce=int, validators=[DataRequired()])\n title = StringField('title',\n validators=[DataRequired(\n message=tr('Title is Required.')),\n Length(min=1, max=32,\n message=tr('Title must be between '+ \\\n '1 and 32 characters long.'))],\n render_kw={'maxlength': 32})\n content = TextAreaField('content',\n validators=[\n DataRequired(message=tr('Content is Required.')),\n Length(min=1, max=65536,\n message=tr('Content must be between '+ \\\n '1 and 65536 characters long.'))\n ],\n render_kw={\n 'rows': 5,\n 'maxlength': 65536\n })\n mediaaddr = URLField(\n 'mediaaddr',\n validators=[\n Optional(),\n URL(message=tr('Please check the url format.')),\n Length(min=1, max=256,\n message=tr('URL must be between '+ \\\n '1 and 256 characters long.'))\n ],\n render_kw={'maxlength': 256})\n disablecomments = SelectField(\n 'disablecomments',\n validators=[\n DataRequired(message=tr('Disabling Comments Setting is Required.'))\n ],\n choices=[('No', tr('No')), ('Yes', tr('Yes'))])\n pinned = SelectField(\n 'pinned',\n validators=[DataRequired(message=tr('Pin Setting is Required.'))],\n choices=[('No', tr('No')), ('Yes', tr('Yes'))])\n postid = IntegerField(\n 'postid',\n validators=[Optional(),\n NumberRange(min=1, max=9999999999999999)],\n widget=HiddenInput())\n\n\nclass LoginForm(FlaskForm): # Login form\n pwd = PasswordField('pwd',\n validators=[\n InputRequired(message=tr('Password Required.')),\n Length(min=5,\n max=128,\n message=tr('Invalid Password!'))\n ],\n id='pwInput',\n render_kw={\n 'minlength': 5,\n 'maxlength': 128\n })\n\n\n# This function handles the installation process\n# and creating the config file and the database\ndef install():\n '''\n Calls db.create_all() to create the database and its tables\n And creates a basic config and return it as result\n\n Returns\n -------\n dictionary\n a Dictionary which contains the default config\n '''\n # Create the database file and its tables\n db.create_all()\n # Create a category (to prevent errors!)\n if dbcategory.query.count() == 0:\n category = dbcategory(tr('Other'), 0)\n db.session.add(category)\n # Save changes to the database\n db.session.commit()\n # Set the admin logged_in status to True\n # and grant the user admin privileges\n session['logged_in'] = True\n # Create a new config\n newconfig = {}\n # Assign default values to our configuration\n newconfig['title'] = ''\n newconfig['desc'] = ''\n newconfig['dispname'] = tr('Admin')\n newconfig['mailaddr'] = ''\n newconfig['ppp'] = 10\n newconfig['dtformat'] = '%Y %B %d'\n newconfig['calendar'] = 'Jalali'\n newconfig['autoapproval'] = 'No'\n newconfig['disablecomments'] = 'No'\n # Save the default password (md5 hash of 'admin') in our new config\n newpwd = hashlib.md5('admin'.encode('utf-8'))\n newconfig['pwd'] = newpwd.hexdigest()\n # Create a config file using our new config\n saveConfig(newconfig)\n # Give user admin's password!\n flash(tr('Password') + ' :\\n\\nadmin')\n # Return this new config object\n # so we can use it to fill the config page fields\n return newconfig\n\n\n# This function will save config dict in config file\n# and updates the global config object in the memory\ndef saveConfig(conf: [str, str]):\n '''\n Saves 'conf' in the config file and updates \n the global config object in the memory \n\n Parameters\n ----------\n conf : [str, str]\n a dictionary which contains the config\n '''\n # Update the global config object values to new config values\n global cfg\n cfg = conf\n # Open config file for output and erase its data\n with open(CONFIG_FILE, 'w') as configFile:\n # Save new config\n json.dump(cfg, configFile, indent=4, sort_keys=True)\n\n\n# This function will return a copy of the global config object\n# or load the config file to memory as the the config object\n# if the global config object is empty and has no values\n# or if forceReload is set to True\ndef getConfig(forceReload: bool = False) -> [str, str]:\n '''\n Returns a copy of the global config object or load \n the config file to memory as the the config object\n if the global config object is empty and has no values\n or forceReload is set to 'True'\n \n Parameters\n ----------\n forceReload : bool\n a boolean which may set to True or False\n default value is False and it means that\n if the global config object is not empty\n then return the global config object but if\n it's empty then load it form the config file\n if forceReload is set to True then it will\n load the config file to memory as the global \n config object no matter if it's already \n loaded in the memory or not\n\n Returns\n -------\n [str, str]\n a copy of the global config object\n '''\n # Use the global config object\n global cfg\n # Load config file to the memory as config object\n # if it's not loaded yet\n if not any(cfg) or forceReload:\n # Check if config file exists\n # (if application is already installed and configured)\n try:\n with open(CONFIG_FILE, 'r') as configFile:\n cfg = json.load(configFile)\n except FileNotFoundError: # This exception means that\n # the program is not installed and configured yet!\n # So we'll call install() to make the config and\n # database files and redirect user to config page\n cfg = install()\n # Return the config object\n return cfg\n\n\n# This function replaces all hashtags in 'rawText' with linked hashtags\n# 'url' must only contain domain name and script path\n# (send request.script_root as its value!)\ndef prcText(rawText: str, url: str) -> str:\n '''\n Replaces all hashtags in the 'rawText' with linked hashtags \n (Adds html tag to all hashtags in the 'rawText' \n and links them to their page!) for example calling \n prcText('hello #dear user!', 'https://www.site.com/blog') \n will return the following string :\n \"hello \n #dear\n user!\"\n\n Parameters\n ----------\n rawText : str\n The raw string (usually post content which is stored in database) \n which may contain some hashtags\n url : str\n Address of our script including domain name\n (for example : https://www.site.com/blog)\n Send request.script_root as its value\n if you don't know how to use it\n\n Returns\n -------\n str\n a string containing 'rawText' content but hashtags \n are replaced with linked () hashtags!\n '''\n # Find all hashtags using regex\n hashTags = re.findall(r\"#(\\w+)\", rawText)\n # Replace each hashtag with a link to that hashtag\n for hashTag in set(hashTags):\n rawText = rawText.replace(\n '#' + hashTag, \"#\" + hashTag + \"\")\n\n # Replace new lines with html
tag!\n rawText = rawText.replace('\\n', '
')\n # Return the produced string to appear on the requested page\n return Markup(rawText)\n\n\n# This function will format date/time\n@cache.memoize()\ndef formatDateTime(strDateTime: str, strFormat: str) -> str:\n '''\n Formats the 'strDateTime' using the 'strFormat' value\n Also converts the gregorian Date/Time to jalali Date/Time \n\n Parameters\n ----------\n strDateTime : str\n a string which must contain a Date/Time \n in '%Y-%m-%d %H:%M:%S' format\n strFormat : str\n a string which must contain a format string \n like '%Y-%m-%d %H:%M:%S'\n\n Returns\n -------\n str\n a string which contains a date/time equal to \n 'strDateTime' but formatted like 'strFormat'\n '''\n # Get settings in order to check if Jalali Calendar is enabled or not later\n config = getConfig()\n # This is where we keep the result!\n result = ''\n # Names of the days of the week\n days = {\n 0: tr('Monday'),\n 1: tr('Tuesday'),\n 2: tr('Wednesday'),\n 3: tr('Thursday'),\n 4: tr('Friday'),\n 5: tr('Saturday'),\n 6: tr('Sunday')\n }\n # Convert strDateTime to a date/time object\n gdt = datetime.datetime.strptime(strDateTime, '%Y-%m-%d %H:%M:%S')\n jdt = jdatetime.GregorianToJalali(gdt.year, gdt.month, gdt.day)\n # If Jalali Calendar is enabled!\n if config['calendar'] == 'Jalali':\n # We'll use the Jalali Calendar\n # Jalali months\n jmonths = {\n 1: tr('Farvardin'),\n 2: tr('Ordibehesht'),\n 3: tr('Khordad'),\n 4: tr('Tir'),\n 5: tr('Mordad'),\n 6: tr('Shahrivar'),\n 7: tr('Mehr'),\n 8: tr('Aban'),\n 9: tr('Azar'),\n 10: tr('Dey'),\n 11: tr('Bahman'),\n 12: tr('Esfand')\n }\n result = strFormat.replace('%Y', str(jdt.jyear))\n result = result.replace('%m', str(jdt.jmonth))\n result = result.replace('%B', jmonths[jdt.jmonth])\n result = result.replace('%d', str(jdt.jday))\n # If Jalali Calendar is disabled\n elif config['calendar'] == 'Gregorian':\n # We'll use the Gregorian Calendar\n # Gregorian months\n gmonths = {\n 1: tr('January'),\n 2: tr('February'),\n 3: tr('March'),\n 4: tr('April'),\n 5: tr('May'),\n 6: tr('June'),\n 7: tr('July'),\n 8: tr('August'),\n 9: tr('September'),\n 10: tr('October'),\n 11: tr('November'),\n 12: tr('December')\n }\n result = strFormat.replace('%Y', str(gdt.year))\n result = result.replace('%m', str(gdt.month))\n result = result.replace('%B', gmonths[gdt.month])\n result = result.replace('%d', str(gdt.day))\n # End If\n result = result.replace('%A', days[gdt.weekday()])\n result = result.replace('%H', str(gdt.hour))\n result = result.replace('%M', str(gdt.minute))\n result = result.replace('%S', str(gdt.second))\n result = result.replace('%N', '')\n # Return formatted date/time string\n return result\n\n\n# After deleting or editing a post we'll call this function\n# to delete or reduce the frequncy of removed hashtags\ndef deleteTag(hashTag: str):\n '''\n Checks a hashtag's frequency in the database\n And performs the following tasks:\n If it's greater than 1 then decrease it by 1\n Else if it's less or equal to 1 then remove the hashtag from the database\n\n Parameters\n ----------\n hashtag : str\n a string which must contain only a hashtag without # \n (for example : 'blog')\n '''\n # Find the hashtag in database using its name\n tag = dbtag.query.filter(dbtag.keyword == hashTag)\n # Check if no hashtag is found\n if tag.count() == 0:\n # Return if no hashtag is found!\n return\n # If we found a hashtag\n else:\n # Reduce hashtag frequncy if it's more than 1 or delete the hashtag\n # if it's not being used in any post (frequncy <= 1)\n if tag.first().frequency > 1:\n # Reduce the hashtag frequncy because it's used in another post\n tag.first().frequency = tag.first().frequency - 1\n else:\n # Delete the hashtag from the database\n # because it's not used in any post\n tag.delete()\n # Save changes to the database\n db.session.commit()\n\n\n# We'll use this decorator before any function\n# that requires to check user privileges\ndef authentication_required(func):\n '''\n A decorator which is used before any function \n that requires to check user privileges and \n check if user has admin privileges or not! \n if user doesn't have admin privileges then \n we'll continue serving them as a user and not admin\n Use this decorator before any function \n that requires to check session['logged_in'] value.\n '''\n @functools.wraps(func)\n def authenticate(*args, **kwargs):\n # If user didn't login yet then\n # we'll save (logged_in = False) for his session!\n if not 'logged_in' in session:\n session['logged_in'] = False\n return func(*args, **kwargs)\n\n return authenticate\n\n\n# We'll use this decorator before any function\n# that requires admin privilages to check if user is admin or not\ndef login_required(func):\n '''\n A decorator which is used before any function \n that requires admin privileges to get executed!\n if user doesn't have admin privileges then \n we'll stop serving them and show them 403 error page\n instead of executing the requested function!\n '''\n @functools.wraps(func)\n @authentication_required\n def checkPrivileges(*args, **kwargs):\n # If 'logged_in' is False then user has no admin privileges\n if session['logged_in'] == False:\n # Render error page 403 and return error code 403 'Forbidden'\n return render_template('403.html'), 403\n return func(*args, **kwargs)\n\n return checkPrivileges\n\n\n# Add some headers to prevent some attacks\n# and log the events\n@app.after_request\ndef after_request(response):\n '''\n Add some headers to prevent some attacks and log the events\n '''\n # Add some headers\n response.headers['X-Content-Type-Options'] = 'nosniff'\n response.headers['X-Frame-Options'] = 'SAMEORIGIN'\n response.headers['X-XSS-Protection'] = '1; mode=block'\n # Log the request and response\n logger.info('[' + request.remote_addr + '] ' + \\\n '<' + request.method + '>' + \\\n request.url + ' - ' + response.status)\n return response\n\n\n# This function will run after each request\n# We'll log each request and errors (if there's any error) here\n@app.teardown_request\ndef logErrors(error=None):\n '''\n Log the errors into the log file\n '''\n if error: # If there's any error\n # Log the request and error\n logger.error('[' + request.remote_addr + '] ' + \\\n '<' + request.method + '>' + \\\n request.url + ' - ' + repr(error))\n\n\n# 400 error page\n@app.errorhandler(ValidationError)\n@app.errorhandler(CSRFError)\n@app.errorhandler(400)\ndef error400(e):\n '''\n Renders our custom 400 error page and \n returns error code 400 'Bad Request' to the client\n '''\n return render_template('400.html', errormsg=str(e)), 400\n\n\n# 404 error page\n@app.errorhandler(404)\ndef error404(e):\n '''\n Renders our custom 404 error page and \n returns error code 404 'Not Found' to the client\n '''\n return render_template('404.html'), 404\n\n\n# This function will generate the required data to create the sidebar\n@authentication_required\ndef sidebar():\n '''\n Generates the required data to create the sidebar\n '''\n # Get configuration\n config = getConfig()\n # Find all categories and save it to 'categories' array\n categories = dbcategory.query.all()\n # We'll show 4 most popular hashtags (favtags)\n # and 4 most used hashtags (frqtags)\n # Find 4 most popular hashtags and save it to 'favtags' array\n favtags = dbtag.query.order_by(dbtag.popularity.desc()).limit(4).all()\n # Find 4 most used hashtags and save it to 'frqtags' array\n frqtags = dbtag.query.order_by(dbtag.frequency.desc()).limit(4).all()\n # Find all links and save it to 'links' array\n links = dblink.query.order_by(dblink.order).all()\n # Render the page with the provided data!\n # Create login form\n form = LoginForm()\n # Create the an object and fill it with the generated data\n items = {}\n items['config'] = config\n items['categories'] = categories\n items['favtags'] = favtags\n items['frqtags'] = frqtags\n items['links'] = links\n items['loginform'] = form\n items['admin'] = session['logged_in']\n # Return the sidebar object\n return items\n\n\n# This function handles our main page\n@app.route(\"/\")\ndef index():\n '''\n Renders the main page or Calls install() if blog is not configured yet\n It also handles increasing the hashtags popularity \n if user clicks on a specific hashtag and requests its page\n '''\n # Check if the program is installed and configured yet!\n # if it's not installed and configured yet then\n # we'll call install() to generate the default config\n # and make database file then we'll call saveConfig()\n # to make the config file and redirect user to the config page\n if not os.path.isfile(CONFIG_FILE):\n saveConfig(install())\n return redirect(url_for('config'))\n # Get the config values\n conf = getConfig()\n # If someone looks for a specific hashtag\n # we'll increase its popularity by 1\n # Get the hashtag from the request\n tag = request.args.get('tag', default='', type=str)\n # Find the hashtag in database\n t = dbtag.query.filter(dbtag.keyword == tag).first()\n # If it's not a bad request and hashtag exists in the database\n if t is not None:\n # Increase its popularity by 1\n t.popularity = t.popularity + 1\n # Save changes to the database\n db.session.commit()\n # Render the main page\n return render_template(\"index.html\",\n sidebar=sidebar(),\n config=conf,\n admin=session['logged_in'])\n\n\n# This function sends the posts to the client\n@app.route(\"/page\", methods=['GET'])\n@limiter.limit(\"60/second\")\n@authentication_required\ndef page():\n '''\n Finds the posts which is requested by user and generates the requested page \n '''\n # Get data from the request\n pageNum = request.args.get('page', default=2, type=int)\n search = request.args.get('search', default='', type=str)\n category = request.args.get('category', default=-1, type=int)\n sort = request.args.get('sort', default='descdate', type=str)\n tag = request.args.get('tag', default='', type=str)\n # We'll use this object to execute database queries\n # and find the posts which user requested!\n query = dbpost.query\n # Handle the requested arguments\n if category > -1: # Find all posts in a specific category\n query = query.filter(dbpost.category == category)\n if search != '': # Find all posts that contain search string\n query = query.filter(\n or_(dbpost.content.contains(search),\n dbpost.title.contains(search)))\n if tag != '':\n # Find all posts that contain a specific hashtag\n # (We'll put a # before the tag\n # because it's not included in request string! /?tag=python)\n query = query.filter(dbpost.content.contains('#' + tag))\n # Change the order and show pinned posts first\n query = query.order_by(\n db.case((((dbpost.flags.op('&')(2)) == 2, 1), ), else_=0).desc())\n # Sort the posts as requested by user\n if sort == 'ascdate': # Sort by Date (Ascending Order)\n query = query.order_by(dbpost.postid)\n if sort == 'descdate' or sort == '': # Sort by Date (Descending Order)\n query = query.order_by(dbpost.postid.desc())\n if sort == 'asccomments': # Sort by Number of Comments (Ascending Order)\n query = query.order_by(dbpost.comments)\n if sort == 'desccomments': # Sort by Number of Comments (Descending Order)\n query = query.order_by(dbpost.comments.desc())\n # Get configuration\n config = getConfig()\n # Get ppp value from config object\n # and save it in a variable (ppp means Posts Per Page)\n ppp = config['ppp']\n # Get date/time format\n dtformat = config['dtformat']\n # Limit the results to the number of Posts Per Page\n results = query.offset(pageNum * ppp).limit(ppp)\n # Send \"END.\" if there's no more results to\n # send with status code 200 which means the request was successful\n if results.count() == 0:\n return Response(response=\"END.\", status=200, mimetype='text/html')\n # This small block of code will handle the positioning of the posts\n # (should they appear on the right side or the left side of the timeline?!)\n if (ppp % 2) == 1 and (pageNum % 2) == 1:\n c = 0\n else:\n c = 1\n # Array of our posts (results)\n posts = []\n # We'll use this loop to run the 'prcText' function on each post's content\n # and replace all hashtags in each post\n # with linked hashtags and format its date/time\n for result in results:\n post = {} # A single post (we'll assign its values below!)\n # We'll replace hashtags with linked hashtags\n # using the 'prcText' function\n # If the content length is greater than 512 characters then\n # we'll just show the first 512 characters of the post content\n # and also remove the last word in the first 512 characters\n # because it may be an incomplete word\n if (len(result.__dict__['content']) > 512):\n # Get first 512 characters of the post content\n content = result.__dict__['content'][0:512]\n # remove the last word in the first 512 characters of\n # the post content because it may be an incomplete word\n content = content[0:content.rfind(' ')]\n # Markup Hashtags\n post['content'] = prcText(content, request.script_root)\n # Add continue reading link to the end of the post content\n post['content'] = post['content'] + Markup('...

' + \\\n '' + \\\n tr('Continue Reading...') + '')\n else:\n # Markup hashtags\n post['content'] = prcText(result.__dict__['content'],\n request.script_root)\n # And format date/time using the 'formatDateTime' function\n post['datetime'] = formatDateTime(result.__dict__['gdatetime'],\n dtformat)\n post['gdatetime'] = result.__dict__['gdatetime']\n # Rest is the same without any modification!\n post['postid'] = result.__dict__['postid']\n post['title'] = result.__dict__['title']\n post['category'] = result.__dict__['category']\n post['mediaaddr'] = result.__dict__['mediaaddr']\n # Set Post Pinned Flag to True if it's set in flags\n post['pinned'] = True if (result.__dict__['flags'] & 2) == 2 else False\n # If user is not admin then we'll show them approved comments\n if (session['logged_in'] == False):\n post['comments'] = dbcomment.query.filter(\n dbcomment.pid == result.__dict__['postid']).filter(\n dbcomment.status >= 2).count()\n else: # If user is admin then we'll show them all comments\n post['comments'] = result.__dict__['comments']\n # Put this post in our results\n posts.append(post)\n # Render results\n return render_template(\"page.html\",\n posts=posts,\n c=c,\n mimetype=\"text/html\",\n admin=session['logged_in'])\n\n\n# This function handles config page and configurations\n@app.route(\"/config\", methods=['POST', 'GET'])\n@login_required\ndef config():\n '''\n Renders the config page and stores new configs in the config file\n '''\n # This page requires admin privileges so we'll check if\n # it's requested by admin or not by using @login_required\n # Get current configuration\n config = getConfig()\n # Form object which holds the request data\n form = ConfigForm(request.form)\n # If user opened the config page without requesting to change the config\n if request.method == 'GET':\n # Fill the form with current config\n form.title.default = config['title']\n form.desc.default = config['desc']\n form.dispname.default = config['dispname']\n form.mailaddr.default = config['mailaddr']\n form.ppp.default = config['ppp']\n form.dtformat.default = config['dtformat']\n form.calendar.default = config['calendar']\n form.currpwd.default = config['pwd']\n form.autoapproval = config['autoapproval']\n form.disablecomments = config['disablecomments']\n form.process(data=config)\n # Render the config page and fill it with current (old) config values\n return render_template(\"config.html\", form=form)\n # Validate the request data if there's a request to change the config\n if form.validate_on_submit():\n # We'll make a new config object\n newconfig = {}\n # And assign the user requested values to this new config object\n newconfig['title'] = form.title.data\n newconfig['desc'] = form.desc.data\n newconfig['dispname'] = form.dispname.data\n newconfig['mailaddr'] = form.mailaddr.data\n newconfig['ppp'] = form.ppp.data\n newconfig['dtformat'] = form.dtformat.data\n newconfig['calendar'] = form.calendar.data\n newconfig['autoapproval'] = form.autoapproval.data\n newconfig['disablecomments'] = form.disablecomments.data\n newpassword = form.newpwd.data\n # Hash the password entered by user\n currpwd = hashlib.md5(form.currpwd.data.encode('utf-8'))\n # Check if the current password is the same as the one entered by user\n if config['pwd'] != currpwd.hexdigest():\n # Warn user if password doesn't match!\n flash(\n tr('Error! You have entered the wrong password, ' +\\\n 'Please try again.'\n ))\n # And render the config page without changing the config\n return render_template(\"config.html\", form=form), 401\n # If admin requested to change the password\n if newpassword != '':\n # Hash the new password\n newpwd = hashlib.md5(newpassword.encode('utf-8'))\n # Save hash to config object\n newconfig['pwd'] = newpwd.hexdigest()\n else:\n # If admin didn't request to change the password\n # then we'll use the current password in new config\n newconfig['pwd'] = config['pwd']\n # If everything goes well, we'll save the new config to the config file\n saveConfig(newconfig)\n # Return to main page\n return redirect(url_for('index'))\n # Render the config page and fill it with newconfig values\n #return render_template(\"config.html\", form=form)\n else: # If there was any problem during request validation\n # Show error messages\n flash('\\n'.join('%s' % val\n for val in form.errors.values()) \\\n .replace('[\\'','').replace('\\']',''))\n # Render the config page and fill it with user requested values\n return render_template(\"config.html\", form=form)\n\n\n# This function handles viewing and saving comments\n@app.route(\"/comments\", methods=['POST', 'GET'])\n@authentication_required\ndef comments():\n '''\n Renders the comments page for a specific post \n and stores new comments in the database\n '''\n # Get 'postid' from the request\n postid = request.args.get('postid', default=-1, type=int)\n # Find the post which this new comment belongs to\n post = dbpost.query.filter(dbpost.postid == postid).first()\n # Check if the post exists and it's not a bad request!\n if post is None:\n # Renders our custom 400 error page and\n # returns error code 400 'Bad Request' to the client\n return render_template('400.html'), 400\n # Get configuration\n config = getConfig()\n # Get date/time format\n dtformat = config['dtformat']\n # Set autoapproval value to true if it's enabled in config\n # or user has admin privileges\n autoapproval = 2 if config['autoapproval'] == 'Yes' \\\n or session['logged_in'] == True else 0\n disablecomments = config['disablecomments']\n if disablecomments != 'Yes':\n disablecomments = 'Yes' if (post.flags & 1) == 1 else 'No'\n # Form object which holds the request data\n # We'll set postid value to hidden field\n form = CommentForm(request.form, postid=postid)\n # Validate the request data\n # and check if there's a new comment\n if form.validate_on_submit() and disablecomments != 'Yes':\n # Get the data from the request\n name = form.name.data\n mailaddr = form.mailaddr.data\n website = form.website.data\n content = form.content.data\n postid = form.postid.data\n gdatetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n status = autoapproval\n # Create a new comment with the data provided above\n comment = dbcomment(postid, content, gdatetime, name, website,\n mailaddr, status)\n # Increase the number of comments of\n # the post which this comment belongs to\n post.comments = post.comments + 1\n # Add this new comment to the database\n db.session.add(comment)\n # Save changes to database\n db.session.commit()\n # If Automatic approval is disabled and user is not admin\n if (not autoapproval) and (session['logged_in'] == False):\n # Inform the user that their comment\n # will appear after it is approved\n flash(tr(\"Thank you! Your comment \" + \\\n \"will appear after it is approved.\"))\n # Load all comments that belong to a specific post from the database\n results = dbcomment.query.filter(dbcomment.pid == postid).all()\n # Array of our comments (results)\n comments = []\n # We'll use this loop to run the 'formatDateTime' function\n # on each comment to format its date/time\n for result in results:\n comment = {} # A single comment (we'll assign its values below!)\n # And format date/time using the 'formatDateTime' function\n comment['datetime'] = formatDateTime(result.__dict__['gdatetime'],\n dtformat)\n # Rest is the same without any modification!\n comment['content'] = result.__dict__['content']\n comment['cmtid'] = result.__dict__['cmtid']\n comment['name'] = result.__dict__['name']\n comment['website'] = result.__dict__['website']\n comment['emailaddr'] = result.__dict__['emailaddr']\n comment['status'] = result.__dict__['status']\n # If user is admin then we'll remove comment's unseen status\n if (session['logged_in'] == True):\n result.status = result.status | 1\n # Put this comment in our results\n comments.append(comment)\n # Save changes to the database\n db.session.commit()\n # Sort Comments and show new comments first!\n comments.reverse()\n # Disable Comments if necessary\n if disablecomments != 'Yes':\n disablecomments = 'Yes' if (post.flags & 1) == 1 else 'No'\n # Show error messages if there was any error(s) during validation\n if (form.errors):\n flash('\\n'.join('%s' % val\n for val in form.errors.values()) \\\n .replace('[\\'','').replace('\\']',''))\n # Redirect to show post page if it was the referrer\n if request.referrer is not None and '/show?' in request.referrer:\n return redirect(request.referrer)\n # Render the comments page\n return render_template(\"comments.html\",\n comments=comments,\n postid=postid,\n form=form,\n disablecomments=disablecomments,\n admin=session['logged_in'])\n\n\n# This function handles removing comments\n@app.route(\"/deletecomment\", methods=['POST'])\n@login_required\ndef deletecomment():\n '''\n Removes a comment from the database\n '''\n # This page requires admin privileges so we'll check if\n # it's requested by admin or not by using @login_required\n # Check if it's not a bad request\n if 'id' in request.json:\n # Get the comment id from the request\n id = int(request.json.get('id'))\n # Find the comment by its id\n comment = dbcomment.query.filter(dbcomment.cmtid == id)\n # Check if the comment exists\n if comment.first() is None:\n return ('', 400)\n # Find the post which this comment belongs to\n post = dbpost.query.filter(\n dbpost.postid == comment.first().pid).first()\n # Reduce the number of comments of\n # the post which this comment belongs to\n post.comments = post.comments - 1\n # Delete the comment\n comment.delete()\n # Save changes to the database\n db.session.commit()\n # Return \"Success!\"\n return ('', 200)\n # Return \"Failure!\"\n return ('', 400)\n\n\n# This function handles approving comments\n@app.route(\"/approvecomment\", methods=['POST'])\n@login_required\ndef approvecomment():\n '''\n Approves a comment\n '''\n # This page requires admin privileges so we'll check if\n # it's requested by admin or not by using @login_required\n # Check if it's not a bad request\n if 'id' in request.json:\n # Get the comment id from the request\n id = int(request.json.get('id'))\n # Find the comment by its id\n comment = dbcomment.query.filter(dbcomment.cmtid == id).first()\n # Check if the comment exists\n if comment is None:\n return ('', 400)\n # Change comment approval status to approved!\n comment.status = 3\n # Save changes to the database\n db.session.commit()\n # Return \"Success!\"\n return ('', 200)\n # Return \"Failure!\"\n return ('', 400)\n\n\n# This function handles viewing and saving comments\n@app.route(\"/commentmoderation\", methods=['GET'])\n@login_required\ndef commentmoderation():\n '''\n Renders the comment moderation page\n '''\n # Get configuration\n config = getConfig()\n # Get date/time format\n dtformat = config['dtformat']\n # Load all comments that require approval\n results = dbcomment.query.filter(dbcomment.status != 3) \\\n .order_by(dbcomment.cmtid.asc()).all()\n # Array of our comments (results)\n comments = []\n # We'll use this loop to run the 'formatDateTime' function\n # on each comment to format its date/time\n for result in results:\n comment = {} # A single comment (we'll assign its values below!)\n # And format date/time using the 'formatDateTime' function\n comment['datetime'] = formatDateTime(result.__dict__['gdatetime'],\n dtformat)\n # Rest is the same without any modification!\n comment['content'] = result.__dict__['content']\n comment['cmtid'] = result.__dict__['cmtid']\n comment['pid'] = result.__dict__['pid']\n comment['name'] = result.__dict__['name']\n comment['website'] = result.__dict__['website']\n comment['emailaddr'] = result.__dict__['emailaddr']\n comment['status'] = result.__dict__['status']\n # Mark new comments as seen\n if (session['logged_in'] == True):\n result.status = result.status | 1\n # Put this comment in our results\n comments.append(comment)\n # Sort Comments and show new comments first!\n comments.reverse()\n # Save changes to database\n db.session.commit()\n # Render the comment moderation page\n return render_template(\n \"commentmoderation.html\",\n comments=comments,\n sidebar=sidebar(),\n )\n\n\n# This function handles 'Share' page\n@app.route(\"/share\", methods=['GET'])\ndef share():\n '''\n Renders the share page\n '''\n # Check if it's not a bad request\n if 'postid' in request.args:\n # Get 'id' from the request\n id = request.args.get('postid', type=int, default=-1)\n # Find the requested post\n post = dbpost.query.filter(dbpost.postid == id).first()\n # Check if the post exists and it's not a bad request\n if not post is None:\n # We'll send the post to the client\n # so its data will appear on the 'Share' page\n return render_template(\"share.html\", post=post)\n # Render 400 error page and\n # returns error code 400 'Bad Request' to the client\n return render_template(\"400.html\"), 400\n\n\n# This function handles 'Post' page which is used for\n# saving new posts and editing existing posts in the database\n@app.route(\"/post\", methods=['POST', 'GET'])\n@login_required\ndef post():\n '''\n Renders the post page and stores new or edited posts in the database\n '''\n # This page requires admin privileges so we'll check if\n # it's requested by admin or not by using @login_required\n # If there's no category then we'll make one!\n # (otherwise an error will occur!)\n if dbcategory.query.count() == 0:\n category = dbcategory(tr('Other'), 0)\n db.session.add(category)\n # Save changes to the database\n db.session.commit()\n # Get list of categories\n categories = dbcategory.query.all()\n # Get 'id' from the requested url, if it's empty we'll assign it '-1'\n id = request.args.get('id', default=-1, type=int)\n # Create an empty post we'll fill it later\n # Find the post by its id\n post = dbpost.query.filter(dbpost.postid == id).first()\n # If there's a post with that id\n if post is not None:\n # Get Post Flags\n disablecomments = 'Yes' if (post.flags & 1) == 1 else 'No'\n pinned = 'Yes' if (post.flags & 2) == 2 else 'No'\n # Create a form and fill it with post data\n form = PostForm(request.form,\n postid=post.postid,\n title=post.title,\n mediaaddr=post.mediaaddr,\n content=post.content,\n category=post.category,\n disablecomments=disablecomments,\n pinned=pinned)\n else:\n # Create an empty form\n form = PostForm(request.form)\n # Get list of all categories and put them in category select field\n form.category.choices = [(cat.catid, cat.name)\n for cat in dbcategory.query.all()]\n # If there's any data from user\n if form.validate_on_submit():\n # Get data from the request\n category = form.category.data\n title = form.title.data\n content = form.content.data\n mediaaddr = form.mediaaddr.data\n postid = form.postid.data\n flags = 1 if form.disablecomments.data == 'Yes' else 0\n flags = (flags | 2) if form.pinned.data == 'Yes' else (flags & 1)\n # If postid is not empty then user is editing an existing post\n if postid:\n # Find the post by its id\n post = dbpost.query.filter(dbpost.postid == int(postid)).first()\n # Find the hashtags in the post\n hashTags = re.findall(r\"#(\\w+)\", post.content)\n # Execute deleteTag for each hashtag in our old post content\n # (we'll add the hashtags that used in the new post content later!)\n for hashTag in set(hashTags):\n deleteTag(hashTag)\n # Save the data from request in the existing post\n post.category = category\n post.title = title\n post.mediaaddr = mediaaddr\n post.content = content\n post.flags = flags\n # If postid is empty then it's a new post\n # and user is not editing an existing post\n else:\n # Get the Date/Time\n gdatetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n # New posts don't have any comment when they're getting published!\n comments = 0\n # Create a new post with the provided data\n newpost = dbpost(title=title,\n content=content,\n gdatetime=gdatetime,\n comments=comments,\n category=category,\n mediaaddr=mediaaddr,\n flags=flags)\n # Save this new post to database\n db.session.add(newpost)\n # Save changes to the database\n db.session.commit()\n # Find all hashtags in the post content\n hashTags = re.findall(r\"#(\\w+)\", content)\n # Process each hashtag in the content\n for hashTag in set(hashTags):\n # Find the hashtag in the database\n tag = dbtag.query.filter(dbtag.keyword == hashTag)\n # If it's a new hashtag\n if tag.count() == 0:\n # It's the first time this hashtag appeared in a post\n # so frequency will be 1\n frequency = 1\n # Nobody clicked on the hashtag yet so popularity is unknown,\n # we'll assign 0 to the popularity for now\n popularity = 0\n # Create this new hashtag\n tag = dbtag(keyword=hashTag,\n frequency=frequency,\n popularity=popularity)\n # Save this hashtag to database\n db.session.add(tag)\n # If it's an existing hashtag\n else:\n # Find the hashtag in the database\n tag = tag.first()\n # Increase its frequency by 1\n tag.frequency = tag.frequency + 1\n # Save changes to the database\n db.session.commit()\n # Return to index and let the user see the new post\n return redirect(url_for('index'))\n # Show error messages if there was any error(s) during validation\n if (form.errors):\n flash('\\n'.join('%s' % val\n for val in form.errors.values()) \\\n .replace('[\\'','').replace('\\']',''))\n # Render the page and fill it with the available data\n return render_template(\"post.html\",\n categories=categories,\n form=form,\n admin=session['logged_in'])\n\n\n# This function Removes the post from the database and\n# execute the 'deleteTag' function for its hashtags and remove its comments\ndef removepost(id: int):\n '''\n Removes a single post from the database\n\n Parameters\n ----------\n id : int\n Post ID, We'll use this ID (Primary Key) \n to find the post in database \n '''\n # Find the post in the database\n post = dbpost.query.filter(dbpost.postid == id)\n # Delete all the comments that belong to this specific post\n dbcomment.query.filter(dbcomment.pid == id).delete()\n # Get post content\n content = post.first().content\n # And find all the hashtags in this content\n hashTags = re.findall(r\"#(\\w+)\", content)\n # Execute the 'deleteTag' function for\n # all the hashtags found in the content\n for hashTag in set(hashTags):\n # remove the hashtag or reduce its frequency\n deleteTag(hashTag)\n # Delete the post\n post.delete()\n # Save changes to the database\n db.session.commit()\n\n\n# This function handles requests for deleting posts\n@app.route(\"/deletepost\", methods=['POST'])\n@login_required\ndef deletepost():\n '''\n Gets Post ID from the request and calls removepost(id) \n to remove that specific post from the database \n '''\n # This page requires admin privileges so we'll check if\n # it's requested by admin or not by using @login_required\n # If it's not a bad request\n if 'id' in request.json:\n # Get postid\n id = int(request.json.get('id'))\n # Return \"Failure!\" if 'id' is wrong!\n if dbpost.query.filter(dbpost.postid == id).first() is None:\n return ('', 400)\n # Call the 'removepost' function to remove the post from the database\n removepost(id)\n # Return \"Success!\"\n return ('', 200)\n # Return \"Failure!\"\n return ('', 400)\n\n\n# This function handles showing single posts\n@app.route(\"/show\", methods=['GET'])\n@authentication_required\ndef show():\n '''\n Renders the show page which is used to show a single post and its details!\n '''\n # Get 'id' from the requested url, if it's empty we'll assign it '-1'\n id = request.args.get('id', default=-1, type=int)\n # Find the post which user requested\n result = dbpost.query.filter(dbpost.postid == id).first()\n # Check if the requested post exists\n if result is None:\n # Render 400 error page and\n # returns error code 400 'Bad Request' to the client\n return render_template('400.html'), 400\n # Find its category\n category = dbcategory.query.filter(\n dbcategory.catid == result.category).first()\n # Get configuration\n config = getConfig()\n # Get date/time format\n dtformat = config['dtformat']\n # Create an empty post! We'll use it to send data to the client\n post = {}\n # Replace hashtags with linked hashtags!\n post['content'] = prcText(result.content, request.script_root)\n # Format date/time\n post['datetime'] = formatDateTime(result.gdatetime, dtformat)\n # Copy rest of the data\n post['postid'] = result.__dict__['postid']\n post['gdatetime'] = result.__dict__['gdatetime']\n post['title'] = result.__dict__['title']\n post['category'] = category.name\n post['mediaaddr'] = result.__dict__['mediaaddr']\n # If user is not admin then we'll show them approved comments\n if (session['logged_in'] == False):\n post['comments'] = dbcomment.query.filter(\n dbcomment.pid == result.__dict__['postid']).filter(\n dbcomment.status >= 2).count()\n else: # If user is admin then we'll show them all comments\n post['comments'] = result.__dict__['comments']\n # Set autoapproval value to true if it's enabled in config\n # or user has admin privileges\n autoapproval = 2 if config['autoapproval'] == 'Yes' \\\n or session['logged_in'] == True else 0\n disablecomments = config['disablecomments']\n # Form object which holds the request data\n # We'll set postid value to hidden field\n form = CommentForm(request.form, postid=id)\n # Load all comments that belong to a specific post from the database\n results = dbcomment.query.filter(dbcomment.pid == id).all()\n # Array of our comments (results)\n comments = []\n # We'll use this loop to run the 'formatDateTime' function\n # on each comment to format its date/time\n for item in results:\n comment = {} # A single comment (we'll assign its values below!)\n # And format date/time using the 'formatDateTime' function\n comment['datetime'] = formatDateTime(item.__dict__['gdatetime'],\n dtformat)\n # Rest is the same without any modification!\n comment['content'] = item.__dict__['content']\n comment['cmtid'] = item.__dict__['cmtid']\n comment['name'] = item.__dict__['name']\n comment['website'] = item.__dict__['website']\n comment['emailaddr'] = item.__dict__['emailaddr']\n comment['status'] = item.__dict__['status']\n # If user is admin then we'll remove comment's unseen status\n if (session['logged_in'] == True):\n item.status = item.status | 1\n # Put this comment in our results\n comments.append(comment)\n # Save changes to the database\n db.session.commit()\n # Sort Comments and show new comments first!\n comments.reverse()\n # Disable Comments if necessary\n if disablecomments != 'Yes':\n disablecomments = 'Yes' if (result.flags & 1) == 1 else 'No'\n # Show the post which was requested by user\n return render_template(\"show.html\",\n post=post,\n comments=comments,\n disablecomments=disablecomments,\n sidebar=sidebar(),\n form=CommentForm(postid=id),\n admin=session['logged_in'])\n\n\n# This function handles creating new categories\n@app.route(\"/newcategory\", methods=['POST'])\n@login_required\ndef newcategory():\n '''\n Creates a new category using the data sent by user\n '''\n # This page requires admin privileges so we'll check if\n # it's requested by admin or not by using @login_required\n # If it's not a bad request\n if 'name' in request.json:\n # Get 'name' from request\n name = request.json.get('name')\n order = 0\n # Return \"Failure!\" if name is empty\n if name == '':\n return ('', 400)\n # Warn user if a category with the same name already exists!\n if dbcategory.query.filter(\n dbcategory.name == name).first() is not None:\n flash(\n tr(\"Error! A category with the same name '%s' \" +\\\n \"already exists. Please choose a new name!\"\n ) % name)\n # Return \"Success!\"\n return ('', 200)\n # Create a new category\n category = dbcategory(name, order)\n # Add this new category to the database\n db.session.add(category)\n # Save changes to the database\n db.session.commit()\n # Return \"Success!\"\n return ('', 200)\n # Return \"Failure!\"\n return ('', 400)\n\n\n# This function handles editing existing categories\n@app.route(\"/editcategory\", methods=['POST'])\n@login_required\ndef editcategory():\n '''\n Changes a category values to the values sent by user\n '''\n # This page requires admin privileges so we'll check if\n # it's requested by admin or not by using @login_required\n # If it's not a bad request\n if 'id' in request.json and 'name' in request.json:\n # Get new category name from the request\n id = int(request.json.get('id'))\n name = request.json.get('name')\n order = 0\n # Return \"Failure!\" if 'id' is wrong!\n if dbcategory.query.filter(dbcategory.catid == id).first() is None:\n return ('', 400)\n # Return \"Failure!\" if name is empty\n if name == '':\n return ('', 400)\n # Return \"Success!\" if there's no change!\n if dbcategory.query.filter(\n dbcategory.catid == id).first().name == name:\n # Return \"Success!\"\n return ('', 200)\n # Warn user if a category with the same name already exists!\n if dbcategory.query.filter(\n dbcategory.name == name).first() is not None:\n flash(\n tr(\"Error! A category with the same name '%s' \" +\\\n \"already exists. Please choose a new name!\"\n ) % name)\n # Return \"Success!\"\n return ('', 200)\n # Find the category\n category = dbcategory.query.filter(dbcategory.catid == id).first()\n # Change the category name to the one requested by user\n category.name = name\n # Save changes to the database\n db.session.commit()\n # Return \"Success!\"\n return ('', 200)\n # Return \"Failure!\"\n return ('', 400)\n\n\n# This function handles removing the categories\n@app.route(\"/removecategory\", methods=['POST'])\n@login_required\ndef removecategory():\n '''\n Removes a category from the database\n '''\n # This page requires admin privileges so we'll check if\n # it's requested by admin or not by using @login_required\n # If it's not a bad request\n if 'id' in request.json:\n # Get the category id from the request\n id = int(request.json.get('id'))\n # Return \"Failure!\" if 'id' is wrong!\n if dbcategory.query.filter(dbcategory.catid == id).first() is None:\n return ('', 400)\n # Find the category by its id in the database and delete it\n dbcategory.query.filter(dbcategory.catid == id).delete()\n # After deleting the category we'll delete\n # all the posts that belong to that category too\n # There's no Do..While in python\n # so we'll use an endless While(True) Loop\n while True:\n # Find the posts that belong the removed category\n post = dbpost.query.filter(dbpost.category == id).first()\n # If the post doesn't exists!\n if post is None:\n break # Break out of this endless loop\n # Call 'removepost' to delete the post\n # and its comments and hashtags\n removepost(post.postid)\n # If there's no category in database\n # we'll make one! (to prevent errors!)\n if dbcategory.query.count() == 0:\n category = dbcategory(tr('Other'), 0)\n db.session.add(category)\n # Save changes to the database\n db.session.commit()\n # Return \"Success!\"\n return ('', 200)\n # Return \"Failure!\"\n return ('', 400)\n\n\n# This function handles adding new links to the link box\n@app.route(\"/addlink\", methods=['POST'])\n@login_required\ndef addlink():\n '''\n Creates a new link using the data sent by user\n '''\n # This page requires admin privileges so we'll check if\n # it's requested by admin or not by using @login_required\n # If it's not a bad request\n if 'address' in request.json \\\n and 'name' in request.json:\n # Get the data from the request\n name = request.json.get('name')\n addr = request.json.get('address')\n address = urllib.parse.unquote(addr)\n order = 0\n # Return \"Failure!\" if name or address is empty\n if name == '' or address == '':\n return ('', 400)\n # Warn user if a link with the same name already exists!\n if dblink.query.filter(dblink.name == name).first() is not None:\n flash(\n tr(\"Error! A link with the same name '%s' \" + \\\n \"already exists. Please choose a new name!\"\n ) % name)\n # Return \"Success!\"\n return ('', 200)\n # Warn user if a link with the same address already exists!\n if dblink.query.filter(dblink.address == address).first() is not None:\n flash(\n tr(\"Error! A link with the same address '%s' \" + \\\n \"already exists. Please enter a new address!\"\n ) % address)\n # Return \"Success!\"\n return ('', 200)\n # Create a new link with the data provided by user\n link = dblink(name, address, order)\n # Add this new link to the database\n db.session.add(link)\n # Save changes to the database\n db.session.commit()\n # Return \"Success!\"\n return ('', 200)\n # Return \"Failure!\"\n return ('', 400)\n\n\n# This function handles editing existing links\n@app.route(\"/editlink\", methods=['POST'])\n@login_required\ndef editlink():\n '''\n Changes a link values to the values sent by user\n '''\n # This page requires admin privileges so we'll check if\n # it's requested by admin or not by using @login_required\n # If it's not a bad request\n if 'id' in request.json and \\\n 'name' in request.json and \\\n 'address' in request.json:\n # Get the data from the request\n id = int(request.json.get('id'))\n name = request.json.get('name')\n addr = request.json.get('address')\n address = urllib.parse.unquote(addr)\n order = 0\n # Return \"Failure!\" if 'id' is wrong!\n if dblink.query.filter(dblink.linkid == id).first() is None:\n return ('', 400)\n # Return \"Failure!\" if name or address is empty\n if name == '' or address == '':\n return ('', 400)\n # Return \"Success!\" if there's no change!\n if dblink.query.filter(dblink.linkid == id).first().name == name \\\n and dblink.query.filter(dblink.linkid == id).first() \\\n .address == address:\n # Return \"Success!\"\n return ('', 200)\n # Warn user if a different link with the same name already exists!\n if dblink.query.filter(dblink.linkid == id).first().name != name \\\n and dblink.query.filter(dblink.name == name) \\\n .first() is not None:\n flash(\n tr(\"Error! A link with the same name '%s' \" + \\\n \"already exists. Please choose a new name!\"\n ) % name)\n # Return \"Success!\"\n return ('', 200)\n # Warn user if a different link with the same address already exists!\n if dblink.query.filter(dblink.linkid == id) \\\n .first().address != address \\\n and dblink.query.filter(dblink.address == address) \\\n .first() is not None:\n flash(\n tr(\"Error! A link with the same address '%s' \" + \\\n \"already exists. Please enter a new address!\"\n ) % address)\n # Return \"Success!\"\n return ('', 200)\n # Find the link by its id\n link = dblink.query.filter(dblink.linkid == id)\n # If the link that we're looking for exists in the database\n if link.count() > 0:\n # Change its values to the ones requested by user\n link.first().name = name\n link.first().address = address\n # Save changes to the database\n db.session.commit()\n # Return \"Success!\"\n return ('', 200)\n # Return \"Failure!\"\n return ('', 400)\n\n\n# This function handles removing links\n@app.route(\"/removelink\", methods=['POST'])\n@login_required\ndef removelink():\n '''\n Removes a link from the database\n '''\n # This page requires admin privileges so we'll check if\n # it's requested by admin or not by using @login_required\n # If it's not a bad request\n if 'id' in request.json:\n # Get link's id from the request\n id = int(request.json.get('id'))\n # Return \"Failure!\" if 'id' is wrong!\n if dblink.query.filter(dblink.linkid == id).first() is None:\n return ('', 400)\n # Find the link by its id and delete it\n dblink.query.filter(dblink.linkid == id).delete()\n # Save changes to the database\n db.session.commit()\n # Return \"Success!\"\n return ('', 200)\n # Return \"Failure!\"\n return ('', 400)\n\n\n# This function handles the login process and user authentication\n# Limit the number of allowed requests to\n@app.route(\"/login\", methods=['POST'])\n@limiter.limit(\"3/minute\") # 3 per minute\n@limiter.limit(\"15/hour\") # 15 per hour\n@limiter.limit(\"45/day\") # 45 per day\ndef login():\n '''\n Gets the password sent by user and compare it \n with the password stored in the config file\n If they're the same then sets session['logged_in'] value to true \n which will grant user admin privileges. \n (the password which is stored in the config file is hashed \n using the md5 algorithm!)\n '''\n # Create login form\n form = LoginForm(request.form)\n # Validate the request\n if form.validate_on_submit():\n # Get configuration\n config = getConfig()\n # Get password from the request\n pwd = form.pwd.data\n # Hash the password\n pwd = hashlib.md5(pwd.encode('utf-8')).hexdigest()\n # If the password entered by the user is\n # the same as the one in our config file\n if config['pwd'] == pwd:\n # Login was successful and we'll set 'logged_in' to true\n # in our session, this will grant the user admin privileges!\n session['logged_in'] = True\n # If the password is wrong\n elif 'pwd' in request.form:\n # Ask user to enter the password again\n flash(\n tr('Error! You have entered the wrong password, ' + \\\n 'Please try again.'))\n # Return to last visited page or the main page\n return redirect(request.referrer or url_for('index'))\n\n\n# This function handles the logout process\n@app.route(\"/logout\")\ndef logout():\n '''\n Removes 'logged_in' from session which will revoke all admin privileges\n '''\n # Remove logged_in from the session\n session.pop('logged_in', None)\n # Return to the main page\n return redirect(url_for('index'))\n", "sub_path": "blog/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 76908, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "flask_limiter.Limiter", "line_number": 77, "usage_type": "call"}, {"api_name": "flask_limiter.util.get_remote_address", "line_number": 79, "usage_type": "name"}, {"api_name": "flask_wtf.csrf.CSRFProtect", "line_number": 83, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 84, "usage_type": "call"}, {"api_name": "flask_caching.Cache", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 93, "usage_type": "call"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 96, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 104, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 105, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 106, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 253, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 289, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 290, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 293, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 294, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 300, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 301, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 303, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 307, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 308, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 310, "usage_type": "call"}, {"api_name": "wtforms.fields.html5.EmailField", "line_number": 314, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 316, "usage_type": "call"}, {"api_name": "wtforms.validators.Email", "line_number": 317, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 318, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 321, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 322, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 324, "usage_type": "call"}, {"api_name": "wtforms.SelectField", "line_number": 328, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 331, "usage_type": "call"}, {"api_name": "wtforms.SelectField", "line_number": 334, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 337, "usage_type": "call"}, {"api_name": "wtforms.SelectField", "line_number": 340, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 343, "usage_type": "call"}, {"api_name": "wtforms.PasswordField", "line_number": 346, "usage_type": "call"}, {"api_name": "wtforms.validators.InputRequired", "line_number": 349, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 350, "usage_type": "call"}, {"api_name": "wtforms.PasswordField", "line_number": 357, "usage_type": "call"}, {"api_name": "wtforms.validators.Optional", "line_number": 359, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 360, "usage_type": "call"}, {"api_name": "wtforms.validators.EqualTo", "line_number": 363, "usage_type": "call"}, {"api_name": "wtforms.PasswordField", "line_number": 370, "usage_type": "call"}, {"api_name": "wtforms.validators.Optional", "line_number": 371, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 372, "usage_type": "call"}, {"api_name": "wtforms.IntegerField", "line_number": 378, "usage_type": "call"}, {"api_name": "wtforms.validators.InputRequired", "line_number": 380, "usage_type": "call"}, {"api_name": "wtforms.validators.NumberRange", "line_number": 381, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 387, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 388, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 389, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 391, "usage_type": "call"}, {"api_name": "wtforms.fields.html5.EmailField", "line_number": 395, "usage_type": "call"}, {"api_name": "wtforms.validators.Optional", "line_number": 397, "usage_type": "call"}, {"api_name": "wtforms.validators.Email", "line_number": 397, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 399, "usage_type": "call"}, {"api_name": "wtforms.fields.html5.URLField", "line_number": 406, "usage_type": "call"}, {"api_name": "wtforms.validators.Optional", "line_number": 408, "usage_type": "call"}, {"api_name": "wtforms.validators.URL", "line_number": 409, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 410, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 416, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 417, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 419, "usage_type": "call"}, {"api_name": "wtforms.IntegerField", "line_number": 423, "usage_type": "call"}, {"api_name": "wtforms.validators.InputRequired", "line_number": 425, "usage_type": "call"}, {"api_name": "wtforms.validators.NumberRange", "line_number": 426, "usage_type": "call"}, {"api_name": "wtforms.widgets.HiddenInput", "line_number": 427, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 430, "usage_type": "name"}, {"api_name": "wtforms.SelectField", "line_number": 431, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 431, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 432, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 433, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 435, "usage_type": "call"}, {"api_name": "wtforms.TextAreaField", "line_number": 439, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 441, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 442, "usage_type": "call"}, {"api_name": "wtforms.fields.html5.URLField", "line_number": 450, "usage_type": "call"}, {"api_name": "wtforms.validators.Optional", "line_number": 453, "usage_type": "call"}, {"api_name": "wtforms.validators.URL", "line_number": 454, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 455, "usage_type": "call"}, {"api_name": "wtforms.SelectField", "line_number": 460, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 463, "usage_type": "call"}, {"api_name": "wtforms.SelectField", "line_number": 466, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 468, "usage_type": "call"}, {"api_name": "wtforms.IntegerField", "line_number": 470, "usage_type": "call"}, {"api_name": "wtforms.validators.Optional", "line_number": 472, "usage_type": "call"}, {"api_name": "wtforms.validators.NumberRange", "line_number": 473, "usage_type": "call"}, {"api_name": "wtforms.widgets.HiddenInput", "line_number": 474, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 477, "usage_type": "name"}, {"api_name": "wtforms.PasswordField", "line_number": 478, "usage_type": "call"}, {"api_name": "wtforms.validators.InputRequired", "line_number": 480, "usage_type": "call"}, {"api_name": "wtforms.validators.Length", "line_number": 481, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 514, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 528, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 533, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 557, "usage_type": "call"}, {"api_name": "json.load", "line_number": 598, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 640, "usage_type": "call"}, {"api_name": "flask.Markup", "line_number": 650, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 690, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 690, "usage_type": "attribute"}, {"api_name": "jdatetime.GregorianToJalali", "line_number": 691, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 798, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 799, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 794, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 819, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 821, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 815, "usage_type": "call"}, {"api_name": "flask.request.remote_addr", "line_number": 839, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 839, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 840, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 840, "usage_type": "name"}, {"api_name": "flask.request.url", "line_number": 841, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 841, "usage_type": "name"}, {"api_name": "flask.request.remote_addr", "line_number": 854, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 854, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 855, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 855, "usage_type": "name"}, {"api_name": "flask.request.url", "line_number": 856, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 856, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 868, "usage_type": "call"}, {"api_name": "wtforms.validators.ValidationError", "line_number": 860, "usage_type": "argument"}, {"api_name": "flask_wtf.csrf.CSRFError", "line_number": 861, "usage_type": "argument"}, {"api_name": "flask.render_template", "line_number": 878, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 910, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 928, "usage_type": "call"}, {"api_name": "os.path", "line_number": 928, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 930, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 930, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 936, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 936, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 936, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 946, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 949, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 961, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 961, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 961, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 962, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 962, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 962, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 963, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 963, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 963, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 964, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 964, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 964, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 965, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 965, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 965, "usage_type": "name"}, {"api_name": "sqlalchemy.or_", "line_number": 974, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 1005, "usage_type": "call"}, {"api_name": "flask.request.script_root", "line_number": 1032, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1032, "usage_type": "name"}, {"api_name": "flask.Markup", "line_number": 1034, "usage_type": "call"}, {"api_name": "flask.request.script_root", "line_number": 1035, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1035, "usage_type": "name"}, {"api_name": "flask.request.script_root", "line_number": 1041, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1041, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1054, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 1063, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1067, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 1082, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1082, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 1084, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1084, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 1098, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 1115, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 1119, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 1124, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 1128, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 1138, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 1138, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 1143, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 1147, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 1159, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1159, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1159, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 1166, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1174, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 1180, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1180, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 1190, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1190, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 1203, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 1206, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1227, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 1240, "usage_type": "call"}, {"api_name": "flask.request.referrer", "line_number": 1244, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1244, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 1245, "usage_type": "call"}, {"api_name": "flask.request.referrer", "line_number": 1245, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1245, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 1247, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1252, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 1265, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1265, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1267, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1267, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1267, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 1299, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1299, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1301, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1301, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1301, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1349, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 1358, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1372, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1372, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 1374, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1374, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1374, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 1381, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 1384, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 1407, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1407, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1407, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 1417, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1417, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 1427, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1427, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 1446, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 1461, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1461, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 1477, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 1505, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 1505, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 1508, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 1512, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1515, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 1537, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1560, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1560, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1562, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1562, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1562, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 1582, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1582, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1582, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 1589, "usage_type": "call"}, {"api_name": "flask.request.script_root", "line_number": 1600, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1600, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1610, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1619, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 1623, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1623, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1643, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 1655, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1661, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 1674, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1674, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1676, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1676, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1676, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 1684, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1712, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1712, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1714, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1714, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1714, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1715, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1715, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1715, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 1731, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1759, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1759, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1761, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1761, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1761, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 1803, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1803, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 1804, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1804, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1806, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1806, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1806, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1807, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1807, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1807, "usage_type": "name"}, {"api_name": "urllib.parse.parse.unquote", "line_number": 1808, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 1808, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 1808, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 1815, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 1823, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1851, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1851, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 1852, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1852, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 1853, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1853, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1855, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1855, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1855, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1856, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1856, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1856, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1857, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1857, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1857, "usage_type": "name"}, {"api_name": "urllib.parse.parse.unquote", "line_number": 1858, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 1858, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 1858, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 1876, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 1887, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1918, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1918, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 1920, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 1920, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1920, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 1950, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1950, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 1958, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1964, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 1966, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1966, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 1968, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 1972, "usage_type": "call"}, {"api_name": "flask.request.referrer", "line_number": 1972, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1972, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 1972, "usage_type": "call"}, {"api_name": "flask.session.pop", "line_number": 1982, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1982, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 1984, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 1984, "usage_type": "call"}]} +{"seq_id": "507033264", "text": "from .utils import sqlite3, OperationalError, suggest_column_types, column_affinity\nfrom collections import namedtuple, OrderedDict\nfrom collections.abc import Mapping\nimport contextlib\nimport datetime\nimport decimal\nimport hashlib\nimport inspect\nimport itertools\nimport json\nimport os\nimport pathlib\nimport re\nfrom sqlite_fts4 import rank_bm25\nimport sys\nimport textwrap\nimport uuid\n\nSQLITE_MAX_VARS = 999\n\n_virtual_table_using_re = re.compile(\n r\"\"\"\n^ # Start of string\n\\s*CREATE\\s+VIRTUAL\\s+TABLE\\s+ # CREATE VIRTUAL TABLE\n(\n '(?P[^']*(?:''[^']*)*)' | # single quoted name\n \"(?P[^\"]*(?:\"\"[^\"]*)*)\" | # double quoted name\n `(?P[^`]+)` | # `backtick` quoted name\n \\[(?P[^\\]]+)\\] | # [...] quoted name\n (?P # SQLite non-quoted identifier\n [A-Za-z_\\u0080-\\uffff] # \\u0080-\\uffff = \"any character larger than u007f\"\n [A-Za-z_\\u0080-\\uffff0-9\\$]* # zero-or-more alphanemuric or $\n )\n)\n\\s+(IF\\s+NOT\\s+EXISTS\\s+)? # IF NOT EXISTS (optional)\nUSING\\s+(?P\\w+) # e.g. USING FTS5\n\"\"\",\n re.VERBOSE | re.IGNORECASE,\n)\n\ntry:\n import pandas as pd\nexcept ImportError:\n pd = None\n\ntry:\n import numpy as np\nexcept ImportError:\n np = None\n\nColumn = namedtuple(\n \"Column\", (\"cid\", \"name\", \"type\", \"notnull\", \"default_value\", \"is_pk\")\n)\nColumnDetails = namedtuple(\n \"ColumnDetails\",\n (\n \"table\",\n \"column\",\n \"total_rows\",\n \"num_null\",\n \"num_blank\",\n \"num_distinct\",\n \"most_common\",\n \"least_common\",\n ),\n)\nForeignKey = namedtuple(\n \"ForeignKey\", (\"table\", \"column\", \"other_table\", \"other_column\")\n)\nIndex = namedtuple(\"Index\", (\"seq\", \"name\", \"unique\", \"origin\", \"partial\", \"columns\"))\nXIndex = namedtuple(\"XIndex\", (\"name\", \"columns\"))\nXIndexColumn = namedtuple(\n \"XIndexColumn\", (\"seqno\", \"cid\", \"name\", \"desc\", \"coll\", \"key\")\n)\nTrigger = namedtuple(\"Trigger\", (\"name\", \"table\", \"sql\"))\n\n\nDEFAULT = object()\n\nCOLUMN_TYPE_MAPPING = {\n float: \"FLOAT\",\n int: \"INTEGER\",\n bool: \"INTEGER\",\n str: \"TEXT\",\n bytes.__class__: \"BLOB\",\n bytes: \"BLOB\",\n memoryview: \"BLOB\",\n datetime.datetime: \"TEXT\",\n datetime.date: \"TEXT\",\n datetime.time: \"TEXT\",\n decimal.Decimal: \"FLOAT\",\n None.__class__: \"TEXT\",\n uuid.UUID: \"TEXT\",\n # SQLite explicit types\n \"TEXT\": \"TEXT\",\n \"INTEGER\": \"INTEGER\",\n \"FLOAT\": \"FLOAT\",\n \"BLOB\": \"BLOB\",\n \"text\": \"TEXT\",\n \"integer\": \"INTEGER\",\n \"float\": \"FLOAT\",\n \"blob\": \"BLOB\",\n}\n# If numpy is available, add more types\nif np:\n COLUMN_TYPE_MAPPING.update(\n {\n np.int8: \"INTEGER\",\n np.int16: \"INTEGER\",\n np.int32: \"INTEGER\",\n np.int64: \"INTEGER\",\n np.uint8: \"INTEGER\",\n np.uint16: \"INTEGER\",\n np.uint32: \"INTEGER\",\n np.uint64: \"INTEGER\",\n np.float16: \"FLOAT\",\n np.float32: \"FLOAT\",\n np.float64: \"FLOAT\",\n }\n )\n\n# If pandas is available, add more types\nif pd:\n COLUMN_TYPE_MAPPING.update({pd.Timestamp: \"TEXT\"})\n\n\nclass AlterError(Exception):\n pass\n\n\nclass NoObviousTable(Exception):\n pass\n\n\nclass BadPrimaryKey(Exception):\n pass\n\n\nclass NotFoundError(Exception):\n pass\n\n\nclass PrimaryKeyRequired(Exception):\n pass\n\n\nclass InvalidColumns(Exception):\n pass\n\n\nclass DescIndex(str):\n pass\n\n\n_COUNTS_TABLE_CREATE_SQL = \"\"\"\nCREATE TABLE IF NOT EXISTS [{}](\n [table] TEXT PRIMARY KEY,\n count INTEGER DEFAULT 0\n);\n\"\"\".strip()\n\n\nclass Database:\n _counts_table_name = \"_counts\"\n use_counts_table = False\n\n def __init__(\n self,\n filename_or_conn=None,\n memory=False,\n recreate=False,\n recursive_triggers=True,\n tracer=None,\n use_counts_table=False,\n ):\n assert (filename_or_conn is not None and not memory) or (\n filename_or_conn is None and memory\n ), \"Either specify a filename_or_conn or pass memory=True\"\n if memory or filename_or_conn == \":memory:\":\n self.conn = sqlite3.connect(\":memory:\")\n elif isinstance(filename_or_conn, (str, pathlib.Path)):\n if recreate and os.path.exists(filename_or_conn):\n os.remove(filename_or_conn)\n self.conn = sqlite3.connect(str(filename_or_conn))\n else:\n assert not recreate, \"recreate cannot be used with connections, only paths\"\n self.conn = filename_or_conn\n self._tracer = tracer\n if recursive_triggers:\n self.execute(\"PRAGMA recursive_triggers=on;\")\n self._registered_functions = set()\n self.use_counts_table = use_counts_table\n\n @contextlib.contextmanager\n def tracer(self, tracer=None):\n prev_tracer = self._tracer\n self._tracer = tracer or print\n try:\n yield self\n finally:\n self._tracer = prev_tracer\n\n def __getitem__(self, table_name):\n return self.table(table_name)\n\n def __repr__(self):\n return \"\".format(self.conn)\n\n def register_function(self, fn=None, deterministic=None, replace=False):\n def register(fn):\n name = fn.__name__\n arity = len(inspect.signature(fn).parameters)\n if not replace and (name, arity) in self._registered_functions:\n return fn\n kwargs = {}\n if deterministic and sys.version_info >= (3, 8):\n kwargs[\"deterministic\"] = True\n self.conn.create_function(name, arity, fn, **kwargs)\n self._registered_functions.add((name, arity))\n return fn\n\n if fn is None:\n return register\n else:\n register(fn)\n\n def register_fts4_bm25(self):\n self.register_function(rank_bm25, deterministic=True)\n\n def attach(self, alias, filepath):\n attach_sql = \"\"\"\n ATTACH DATABASE '{}' AS [{}];\n \"\"\".format(\n str(pathlib.Path(filepath).resolve()), alias\n ).strip()\n self.execute(attach_sql)\n\n def execute(self, sql, parameters=None):\n if self._tracer:\n self._tracer(sql, parameters)\n if parameters is not None:\n return self.conn.execute(sql, parameters)\n else:\n return self.conn.execute(sql)\n\n def executescript(self, sql):\n if self._tracer:\n self._tracer(sql, None)\n return self.conn.executescript(sql)\n\n def table(self, table_name, **kwargs):\n klass = View if table_name in self.view_names() else Table\n return klass(self, table_name, **kwargs)\n\n def quote(self, value):\n # Normally we would use .execute(sql, [params]) for escaping, but\n # occasionally that isn't available - most notable when we need\n # to include a \"... DEFAULT 'value'\" in a column definition.\n return self.execute(\n # Use SQLite itself to correctly escape this string:\n \"SELECT quote(:value)\",\n {\"value\": value},\n ).fetchone()[0]\n\n def table_names(self, fts4=False, fts5=False):\n where = [\"type = 'table'\"]\n if fts4:\n where.append(\"sql like '%USING FTS4%'\")\n if fts5:\n where.append(\"sql like '%USING FTS5%'\")\n sql = \"select name from sqlite_master where {}\".format(\" AND \".join(where))\n return [r[0] for r in self.execute(sql).fetchall()]\n\n def view_names(self):\n return [\n r[0]\n for r in self.execute(\n \"select name from sqlite_master where type = 'view'\"\n ).fetchall()\n ]\n\n @property\n def tables(self):\n return [self[name] for name in self.table_names()]\n\n @property\n def views(self):\n return [self[name] for name in self.view_names()]\n\n @property\n def triggers(self):\n return [\n Trigger(*r)\n for r in self.execute(\n \"select name, tbl_name, sql from sqlite_master where type = 'trigger'\"\n ).fetchall()\n ]\n\n @property\n def triggers_dict(self):\n \"Returns {trigger_name: sql} dictionary\"\n return {trigger.name: trigger.sql for trigger in self.triggers}\n\n @property\n def schema(self):\n sqls = []\n for row in self.execute(\n \"select sql from sqlite_master where sql is not null\"\n ).fetchall():\n sql = row[0]\n if not sql.strip().endswith(\";\"):\n sql += \";\"\n sqls.append(sql)\n return \"\\n\".join(sqls)\n\n @property\n def journal_mode(self):\n return self.execute(\"PRAGMA journal_mode;\").fetchone()[0]\n\n def enable_wal(self):\n if self.journal_mode != \"wal\":\n self.execute(\"PRAGMA journal_mode=wal;\")\n\n def disable_wal(self):\n if self.journal_mode != \"delete\":\n self.execute(\"PRAGMA journal_mode=delete;\")\n\n def _ensure_counts_table(self):\n with self.conn:\n self.execute(_COUNTS_TABLE_CREATE_SQL.format(self._counts_table_name))\n\n def enable_counts(self):\n self._ensure_counts_table()\n for table in self.tables:\n if (\n table.virtual_table_using is None\n and table.name != self._counts_table_name\n ):\n table.enable_counts()\n self.use_counts_table = True\n\n def cached_counts(self, tables=None):\n sql = \"select [table], count from {}\".format(self._counts_table_name)\n if tables:\n sql += \" where [table] in ({})\".format(\", \".join(\"?\" for table in tables))\n try:\n return {r[0]: r[1] for r in self.execute(sql, tables).fetchall()}\n except OperationalError:\n return {}\n\n def reset_counts(self):\n tables = [table for table in self.tables if table.has_counts_triggers]\n with self.conn:\n self._ensure_counts_table()\n counts_table = self[self._counts_table_name]\n counts_table.delete_where()\n counts_table.insert_all(\n {\"table\": table.name, \"count\": table.execute_count()}\n for table in tables\n )\n\n def execute_returning_dicts(self, sql, params=None):\n cursor = self.execute(sql, params or tuple())\n keys = [d[0] for d in cursor.description]\n return [dict(zip(keys, row)) for row in cursor.fetchall()]\n\n def resolve_foreign_keys(self, name, foreign_keys):\n # foreign_keys may be a list of strcolumn names, a list of ForeignKey tuples,\n # a list of tuple-pairs or a list of tuple-triples. We want to turn\n # it into a list of ForeignKey tuples\n if all(isinstance(fk, ForeignKey) for fk in foreign_keys):\n return foreign_keys\n if all(isinstance(fk, str) for fk in foreign_keys):\n # It's a list of columns\n fks = []\n for column in foreign_keys:\n other_table = self[name].guess_foreign_table(column)\n other_column = self[name].guess_foreign_column(other_table)\n fks.append(ForeignKey(name, column, other_table, other_column))\n return fks\n assert all(\n isinstance(fk, (tuple, list)) for fk in foreign_keys\n ), \"foreign_keys= should be a list of tuples\"\n fks = []\n for tuple_or_list in foreign_keys:\n assert len(tuple_or_list) in (\n 2,\n 3,\n ), \"foreign_keys= should be a list of tuple pairs or triples\"\n if len(tuple_or_list) == 3:\n fks.append(\n ForeignKey(\n name, tuple_or_list[0], tuple_or_list[1], tuple_or_list[2]\n )\n )\n else:\n # Guess the primary key\n fks.append(\n ForeignKey(\n name,\n tuple_or_list[0],\n tuple_or_list[1],\n self[name].guess_foreign_column(tuple_or_list[1]),\n )\n )\n return fks\n\n def create_table_sql(\n self,\n name,\n columns,\n pk=None,\n foreign_keys=None,\n column_order=None,\n not_null=None,\n defaults=None,\n hash_id=None,\n extracts=None,\n ):\n foreign_keys = self.resolve_foreign_keys(name, foreign_keys or [])\n foreign_keys_by_column = {fk.column: fk for fk in foreign_keys}\n # any extracts will be treated as integer columns with a foreign key\n extracts = resolve_extracts(extracts)\n for extract_column, extract_table in extracts.items():\n if isinstance(extract_column, tuple):\n assert False\n # Ensure other table exists\n if not self[extract_table].exists():\n self.create_table(extract_table, {\"id\": int, \"value\": str}, pk=\"id\")\n columns[extract_column] = int\n foreign_keys_by_column[extract_column] = ForeignKey(\n name, extract_column, extract_table, \"id\"\n )\n # Soundness check not_null, and defaults if provided\n not_null = not_null or set()\n defaults = defaults or {}\n assert all(\n n in columns for n in not_null\n ), \"not_null set {} includes items not in columns {}\".format(\n repr(not_null), repr(set(columns.keys()))\n )\n assert all(\n n in columns for n in defaults\n ), \"defaults set {} includes items not in columns {}\".format(\n repr(set(defaults)), repr(set(columns.keys()))\n )\n validate_column_names(columns.keys())\n column_items = list(columns.items())\n if column_order is not None:\n column_items.sort(\n key=lambda p: column_order.index(p[0]) if p[0] in column_order else 999\n )\n if hash_id:\n column_items.insert(0, (hash_id, str))\n pk = hash_id\n # Soundness check foreign_keys point to existing tables\n for fk in foreign_keys:\n if not any(\n c for c in self[fk.other_table].columns if c.name == fk.other_column\n ):\n raise AlterError(\n \"No such column: {}.{}\".format(fk.other_table, fk.other_column)\n )\n\n column_defs = []\n # ensure pk is a tuple\n single_pk = None\n if isinstance(pk, list) and len(pk) == 1 and isinstance(pk[0], str):\n pk = pk[0]\n if isinstance(pk, str):\n single_pk = pk\n if pk not in [c[0] for c in column_items]:\n column_items.insert(0, (pk, int))\n for column_name, column_type in column_items:\n column_extras = []\n if column_name == single_pk:\n column_extras.append(\"PRIMARY KEY\")\n if column_name in not_null:\n column_extras.append(\"NOT NULL\")\n if column_name in defaults and defaults[column_name] is not None:\n column_extras.append(\n \"DEFAULT {}\".format(self.quote(defaults[column_name]))\n )\n if column_name in foreign_keys_by_column:\n column_extras.append(\n \"REFERENCES [{other_table}]([{other_column}])\".format(\n other_table=foreign_keys_by_column[column_name].other_table,\n other_column=foreign_keys_by_column[column_name].other_column,\n )\n )\n column_defs.append(\n \" [{column_name}] {column_type}{column_extras}\".format(\n column_name=column_name,\n column_type=COLUMN_TYPE_MAPPING[column_type],\n column_extras=(\" \" + \" \".join(column_extras))\n if column_extras\n else \"\",\n )\n )\n extra_pk = \"\"\n if single_pk is None and pk and len(pk) > 1:\n extra_pk = \",\\n PRIMARY KEY ({pks})\".format(\n pks=\", \".join([\"[{}]\".format(p) for p in pk])\n )\n columns_sql = \",\\n\".join(column_defs)\n sql = \"\"\"CREATE TABLE [{table}] (\n{columns_sql}{extra_pk}\n);\n \"\"\".format(\n table=name, columns_sql=columns_sql, extra_pk=extra_pk\n )\n return sql\n\n def create_table(\n self,\n name,\n columns,\n pk=None,\n foreign_keys=None,\n column_order=None,\n not_null=None,\n defaults=None,\n hash_id=None,\n extracts=None,\n ):\n sql = self.create_table_sql(\n name=name,\n columns=columns,\n pk=pk,\n foreign_keys=foreign_keys,\n column_order=column_order,\n not_null=not_null,\n defaults=defaults,\n hash_id=hash_id,\n extracts=extracts,\n )\n self.execute(sql)\n return self.table(\n name,\n pk=pk,\n foreign_keys=foreign_keys,\n column_order=column_order,\n not_null=not_null,\n defaults=defaults,\n hash_id=hash_id,\n )\n\n def create_view(self, name, sql, ignore=False, replace=False):\n assert not (\n ignore and replace\n ), \"Use one or the other of ignore/replace, not both\"\n create_sql = \"CREATE VIEW {name} AS {sql}\".format(name=name, sql=sql)\n if ignore or replace:\n # Does view exist already?\n if name in self.view_names():\n if ignore:\n return self\n elif replace:\n # If SQL is the same, do nothing\n if create_sql == self[name].schema:\n return self\n self[name].drop()\n self.execute(create_sql)\n return self\n\n def m2m_table_candidates(self, table, other_table):\n \"Returns potential m2m tables for arguments, based on FKs\"\n candidates = []\n tables = {table, other_table}\n for table in self.tables:\n # Does it have foreign keys to both table and other_table?\n has_fks_to = {fk.other_table for fk in table.foreign_keys}\n if has_fks_to.issuperset(tables):\n candidates.append(table.name)\n return candidates\n\n def add_foreign_keys(self, foreign_keys):\n # foreign_keys is a list of explicit 4-tuples\n assert all(\n len(fk) == 4 and isinstance(fk, (list, tuple)) for fk in foreign_keys\n ), \"foreign_keys must be a list of 4-tuples, (table, column, other_table, other_column)\"\n\n foreign_keys_to_create = []\n\n # Verify that all tables and columns exist\n for table, column, other_table, other_column in foreign_keys:\n if not self[table].exists():\n raise AlterError(\"No such table: {}\".format(table))\n if column not in self[table].columns_dict:\n raise AlterError(\"No such column: {} in {}\".format(column, table))\n if not self[other_table].exists():\n raise AlterError(\"No such other_table: {}\".format(other_table))\n if (\n other_column != \"rowid\"\n and other_column not in self[other_table].columns_dict\n ):\n raise AlterError(\n \"No such other_column: {} in {}\".format(other_column, other_table)\n )\n # We will silently skip foreign keys that exist already\n if not any(\n fk\n for fk in self[table].foreign_keys\n if fk.column == column\n and fk.other_table == other_table\n and fk.other_column == other_column\n ):\n foreign_keys_to_create.append(\n (table, column, other_table, other_column)\n )\n\n # Construct SQL for use with \"UPDATE sqlite_master SET sql = ? WHERE name = ?\"\n table_sql = {}\n for table, column, other_table, other_column in foreign_keys_to_create:\n old_sql = table_sql.get(table, self[table].schema)\n extra_sql = \",\\n FOREIGN KEY([{column}]) REFERENCES [{other_table}]([{other_column}])\\n\".format(\n column=column, other_table=other_table, other_column=other_column\n )\n # Stick that bit in at the very end just before the closing ')'\n last_paren = old_sql.rindex(\")\")\n new_sql = old_sql[:last_paren].strip() + extra_sql + old_sql[last_paren:]\n table_sql[table] = new_sql\n\n # And execute it all within a single transaction\n with self.conn:\n cursor = self.conn.cursor()\n schema_version = cursor.execute(\"PRAGMA schema_version\").fetchone()[0]\n cursor.execute(\"PRAGMA writable_schema = 1\")\n for table_name, new_sql in table_sql.items():\n cursor.execute(\n \"UPDATE sqlite_master SET sql = ? WHERE name = ?\",\n (new_sql, table_name),\n )\n cursor.execute(\"PRAGMA schema_version = %d\" % (schema_version + 1))\n cursor.execute(\"PRAGMA writable_schema = 0\")\n # Have to VACUUM outside the transaction to ensure .foreign_keys property\n # can see the newly created foreign key.\n self.vacuum()\n\n def index_foreign_keys(self):\n for table_name in self.table_names():\n table = self[table_name]\n existing_indexes = {\n i.columns[0] for i in table.indexes if len(i.columns) == 1\n }\n for fk in table.foreign_keys:\n if fk.column not in existing_indexes:\n table.create_index([fk.column])\n\n def vacuum(self):\n self.execute(\"VACUUM;\")\n\n\nclass Queryable:\n def exists(self):\n return False\n\n def __init__(self, db, name):\n self.db = db\n self.name = name\n\n def execute_count(self):\n return self.db.execute(\n \"select count(*) from [{}]\".format(self.name)\n ).fetchone()[0]\n\n @property\n def count(self):\n return self.execute_count()\n\n @property\n def rows(self):\n return self.rows_where()\n\n def rows_where(\n self,\n where=None,\n where_args=None,\n order_by=None,\n select=\"*\",\n limit=None,\n offset=None,\n ):\n if not self.exists():\n return []\n sql = \"select {} from [{}]\".format(select, self.name)\n if where is not None:\n sql += \" where \" + where\n if order_by is not None:\n sql += \" order by \" + order_by\n if limit is not None:\n sql += \" limit {}\".format(limit)\n if offset is not None:\n sql += \" offset {}\".format(offset)\n cursor = self.db.execute(sql, where_args or [])\n columns = [c[0] for c in cursor.description]\n for row in cursor:\n yield dict(zip(columns, row))\n\n def pks_and_rows_where(\n self,\n where=None,\n where_args=None,\n order_by=None,\n limit=None,\n offset=None,\n ):\n \"Like .rows_where() but returns (pk, row) pairs - pk can be a single value or tuple\"\n column_names = [column.name for column in self.columns]\n pks = [column.name for column in self.columns if column.is_pk]\n if not pks:\n column_names.insert(0, \"rowid\")\n pks = [\"rowid\"]\n select = \",\".join(\"[{}]\".format(column_name) for column_name in column_names)\n for row in self.rows_where(\n select=select,\n where=where,\n where_args=where_args,\n order_by=order_by,\n limit=limit,\n offset=offset,\n ):\n row_pk = tuple(row[pk] for pk in pks)\n if len(row_pk) == 1:\n row_pk = row_pk[0]\n yield row_pk, row\n\n @property\n def columns(self):\n if not self.exists():\n return []\n rows = self.db.execute(\"PRAGMA table_info([{}])\".format(self.name)).fetchall()\n return [Column(*row) for row in rows]\n\n @property\n def columns_dict(self):\n \"Returns {column: python-type} dictionary\"\n return {column.name: column_affinity(column.type) for column in self.columns}\n\n @property\n def schema(self):\n return self.db.execute(\n \"select sql from sqlite_master where name = ?\", (self.name,)\n ).fetchone()[0]\n\n\nclass Table(Queryable):\n last_rowid = None\n last_pk = None\n\n def __init__(\n self,\n db,\n name,\n pk=None,\n foreign_keys=None,\n column_order=None,\n not_null=None,\n defaults=None,\n batch_size=100,\n hash_id=None,\n alter=False,\n ignore=False,\n replace=False,\n extracts=None,\n conversions=None,\n columns=None,\n ):\n super().__init__(db, name)\n self._defaults = dict(\n pk=pk,\n foreign_keys=foreign_keys,\n column_order=column_order,\n not_null=not_null,\n defaults=defaults,\n batch_size=batch_size,\n hash_id=hash_id,\n alter=alter,\n ignore=ignore,\n replace=replace,\n extracts=extracts,\n conversions=conversions or {},\n columns=columns,\n )\n\n def __repr__(self):\n return \"\".format(\n self.name,\n \" (does not exist yet)\"\n if not self.exists()\n else \" ({})\".format(\", \".join(c.name for c in self.columns)),\n )\n\n @property\n def count(self):\n if self.db.use_counts_table:\n counts = self.db.cached_counts([self.name])\n if counts:\n return next(iter(counts.values()))\n return self.execute_count()\n\n def exists(self):\n return self.name in self.db.table_names()\n\n @property\n def pks(self):\n names = [column.name for column in self.columns if column.is_pk]\n if not names:\n names = [\"rowid\"]\n return names\n\n def get(self, pk_values):\n if not isinstance(pk_values, (list, tuple)):\n pk_values = [pk_values]\n pks = self.pks\n last_pk = pk_values[0] if len(pks) == 1 else pk_values\n if len(pks) != len(pk_values):\n raise NotFoundError(\n \"Need {} primary key value{}\".format(\n len(pks), \"\" if len(pks) == 1 else \"s\"\n )\n )\n\n wheres = [\"[{}] = ?\".format(pk_name) for pk_name in pks]\n rows = self.rows_where(\" and \".join(wheres), pk_values)\n try:\n row = list(rows)[0]\n self.last_pk = last_pk\n return row\n except IndexError:\n raise NotFoundError\n\n @property\n def foreign_keys(self):\n fks = []\n for row in self.db.execute(\n \"PRAGMA foreign_key_list([{}])\".format(self.name)\n ).fetchall():\n if row is not None:\n id, seq, table_name, from_, to_, on_update, on_delete, match = row\n fks.append(\n ForeignKey(\n table=self.name,\n column=from_,\n other_table=table_name,\n other_column=to_,\n )\n )\n return fks\n\n @property\n def virtual_table_using(self):\n \"Returns type of virtual table or None if this is not a virtual table\"\n match = _virtual_table_using_re.match(self.schema)\n if match is None:\n return None\n return match.groupdict()[\"using\"].upper()\n\n @property\n def indexes(self):\n sql = 'PRAGMA index_list(\"{}\")'.format(self.name)\n indexes = []\n for row in self.db.execute_returning_dicts(sql):\n index_name = row[\"name\"]\n index_name_quoted = (\n '\"{}\"'.format(index_name)\n if not index_name.startswith('\"')\n else index_name\n )\n column_sql = \"PRAGMA index_info({})\".format(index_name_quoted)\n columns = []\n for seqno, cid, name in self.db.execute(column_sql).fetchall():\n columns.append(name)\n row[\"columns\"] = columns\n # These columns may be missing on older SQLite versions:\n for key, default in {\"origin\": \"c\", \"partial\": 0}.items():\n if key not in row:\n row[key] = default\n indexes.append(Index(**row))\n return indexes\n\n @property\n def xindexes(self):\n sql = 'PRAGMA index_list(\"{}\")'.format(self.name)\n indexes = []\n for row in self.db.execute_returning_dicts(sql):\n index_name = row[\"name\"]\n index_name_quoted = (\n '\"{}\"'.format(index_name)\n if not index_name.startswith('\"')\n else index_name\n )\n column_sql = \"PRAGMA index_xinfo({})\".format(index_name_quoted)\n index_columns = []\n for info in self.db.execute(column_sql).fetchall():\n index_columns.append(XIndexColumn(*info))\n indexes.append(XIndex(index_name, index_columns))\n return indexes\n\n @property\n def triggers(self):\n return [\n Trigger(*r)\n for r in self.db.execute(\n \"select name, tbl_name, sql from sqlite_master where type = 'trigger'\"\n \" and tbl_name = ?\",\n (self.name,),\n ).fetchall()\n ]\n\n @property\n def triggers_dict(self):\n \"Returns {trigger_name: sql} dictionary\"\n return {trigger.name: trigger.sql for trigger in self.triggers}\n\n def create(\n self,\n columns,\n pk=None,\n foreign_keys=None,\n column_order=None,\n not_null=None,\n defaults=None,\n hash_id=None,\n extracts=None,\n ):\n columns = {name: value for (name, value) in columns.items()}\n with self.db.conn:\n self.db.create_table(\n self.name,\n columns,\n pk=pk,\n foreign_keys=foreign_keys,\n column_order=column_order,\n not_null=not_null,\n defaults=defaults,\n hash_id=hash_id,\n extracts=extracts,\n )\n return self\n\n def transform(\n self,\n *,\n types=None,\n rename=None,\n drop=None,\n pk=DEFAULT,\n not_null=None,\n defaults=None,\n drop_foreign_keys=None,\n column_order=None,\n ):\n assert self.exists(), \"Cannot transform a table that doesn't exist yet\"\n sqls = self.transform_sql(\n types=types,\n rename=rename,\n drop=drop,\n pk=pk,\n not_null=not_null,\n defaults=defaults,\n drop_foreign_keys=drop_foreign_keys,\n column_order=column_order,\n )\n pragma_foreign_keys_was_on = self.db.execute(\"PRAGMA foreign_keys\").fetchone()[\n 0\n ]\n try:\n if pragma_foreign_keys_was_on:\n self.db.execute(\"PRAGMA foreign_keys=0;\")\n with self.db.conn:\n for sql in sqls:\n self.db.execute(sql)\n # Run the foreign_key_check before we commit\n if pragma_foreign_keys_was_on:\n self.db.execute(\"PRAGMA foreign_key_check;\")\n finally:\n if pragma_foreign_keys_was_on:\n self.db.execute(\"PRAGMA foreign_keys=1;\")\n return self\n\n def transform_sql(\n self,\n *,\n types=None,\n rename=None,\n drop=None,\n pk=DEFAULT,\n not_null=None,\n defaults=None,\n drop_foreign_keys=None,\n column_order=None,\n tmp_suffix=None,\n ):\n types = types or {}\n rename = rename or {}\n drop = drop or set()\n new_table_name = \"{}_new_{}\".format(\n self.name, tmp_suffix or os.urandom(6).hex()\n )\n current_column_pairs = list(self.columns_dict.items())\n new_column_pairs = []\n copy_from_to = {column: column for column, _ in current_column_pairs}\n for name, type_ in current_column_pairs:\n type_ = types.get(name) or type_\n if name in drop:\n del [copy_from_to[name]]\n continue\n new_name = rename.get(name) or name\n new_column_pairs.append((new_name, type_))\n copy_from_to[name] = new_name\n\n sqls = []\n if pk is DEFAULT:\n pks_renamed = tuple(rename.get(p) or p for p in self.pks)\n if len(pks_renamed) == 1:\n pk = pks_renamed[0]\n else:\n pk = pks_renamed\n\n # not_null may be a set or dict, need to convert to a set\n create_table_not_null = {\n rename.get(c.name) or c.name\n for c in self.columns\n if c.notnull\n if c.name not in drop\n }\n if isinstance(not_null, dict):\n # Remove any columns with a value of False\n for key, value in not_null.items():\n # Column may have been renamed\n key = rename.get(key) or key\n if value is False and key in create_table_not_null:\n create_table_not_null.remove(key)\n else:\n create_table_not_null.add(key)\n elif isinstance(not_null, set):\n create_table_not_null.update((rename.get(k) or k) for k in not_null)\n elif not_null is None:\n pass\n else:\n assert False, \"not_null must be a dict or a set or None\"\n # defaults=\n create_table_defaults = {\n (rename.get(c.name) or c.name): c.default_value\n for c in self.columns\n if c.default_value is not None and c.name not in drop\n }\n if defaults is not None:\n create_table_defaults.update(\n {rename.get(c) or c: v for c, v in defaults.items()}\n )\n\n # foreign_keys\n create_table_foreign_keys = []\n for table, column, other_table, other_column in self.foreign_keys:\n if (drop_foreign_keys is None) or (column not in drop_foreign_keys):\n create_table_foreign_keys.append(\n (rename.get(column) or column, other_table, other_column)\n )\n\n if column_order is not None:\n column_order = [rename.get(col) or col for col in column_order]\n\n sqls.append(\n self.db.create_table_sql(\n new_table_name,\n dict(new_column_pairs),\n pk=pk,\n not_null=create_table_not_null,\n defaults=create_table_defaults,\n foreign_keys=create_table_foreign_keys,\n column_order=column_order,\n ).strip()\n )\n\n # Copy across data, respecting any renamed columns\n new_cols = []\n old_cols = []\n for from_, to_ in copy_from_to.items():\n old_cols.append(from_)\n new_cols.append(to_)\n copy_sql = \"INSERT INTO [{new_table}] ({new_cols})\\n SELECT {old_cols} FROM [{old_table}];\".format(\n new_table=new_table_name,\n old_table=self.name,\n old_cols=\", \".join(\"[{}]\".format(col) for col in old_cols),\n new_cols=\", \".join(\"[{}]\".format(col) for col in new_cols),\n )\n sqls.append(copy_sql)\n # Drop the old table\n sqls.append(\"DROP TABLE [{}];\".format(self.name))\n # Rename the new one\n sqls.append(\n \"ALTER TABLE [{}] RENAME TO [{}];\".format(new_table_name, self.name)\n )\n return sqls\n\n def extract(self, columns, table=None, fk_column=None, rename=None):\n rename = rename or {}\n if isinstance(columns, str):\n columns = [columns]\n if not set(columns).issubset(self.columns_dict.keys()):\n raise InvalidColumns(\n \"Invalid columns {} for table with columns {}\".format(\n columns, list(self.columns_dict.keys())\n )\n )\n table = table or \"_\".join(columns)\n first_column = columns[0]\n pks = self.pks\n lookup_table = self.db[table]\n fk_column = fk_column or \"{}_id\".format(table)\n magic_lookup_column = \"{}_{}\".format(fk_column, os.urandom(6).hex())\n\n # Populate the lookup table with all of the extracted unique values\n lookup_columns_definition = {\n (rename.get(col) or col): typ\n for col, typ in self.columns_dict.items()\n if col in columns\n }\n if lookup_table.exists():\n if not set(lookup_columns_definition.items()).issubset(\n lookup_table.columns_dict.items()\n ):\n raise InvalidColumns(\n \"Lookup table {} already exists but does not have columns {}\".format(\n table, lookup_columns_definition\n )\n )\n else:\n lookup_table.create(\n {\n **{\n \"id\": int,\n },\n **lookup_columns_definition,\n },\n pk=\"id\",\n )\n lookup_columns = [(rename.get(col) or col) for col in columns]\n lookup_table.create_index(lookup_columns, unique=True, if_not_exists=True)\n self.db.execute(\n \"INSERT OR IGNORE INTO [{lookup_table}] ({lookup_columns}) SELECT DISTINCT {table_cols} FROM [{table}]\".format(\n lookup_table=table,\n lookup_columns=\", \".join(\"[{}]\".format(c) for c in lookup_columns),\n table_cols=\", \".join(\"[{}]\".format(c) for c in columns),\n table=self.name,\n )\n )\n\n # Now add the new fk_column\n self.add_column(magic_lookup_column, int)\n\n # And populate it\n self.db.execute(\n \"UPDATE [{table}] SET [{magic_lookup_column}] = (SELECT id FROM [{lookup_table}] WHERE {where})\".format(\n table=self.name,\n magic_lookup_column=magic_lookup_column,\n lookup_table=table,\n where=\" AND \".join(\n \"[{table}].[{column}] = [{lookup_table}].[{lookup_column}]\".format(\n table=self.name,\n lookup_table=table,\n column=column,\n lookup_column=rename.get(column) or column,\n )\n for column in columns\n ),\n )\n )\n # Figure out the right column order\n column_order = []\n for c in self.columns:\n if c.name in columns and magic_lookup_column not in column_order:\n column_order.append(magic_lookup_column)\n elif c.name == magic_lookup_column:\n continue\n else:\n column_order.append(c.name)\n\n # Drop the unnecessary columns and rename lookup column\n self.transform(\n drop=set(columns),\n rename={magic_lookup_column: fk_column},\n column_order=column_order,\n )\n\n # And add the foreign key constraint\n self.add_foreign_key(fk_column, table, \"id\")\n return self\n\n def create_index(self, columns, index_name=None, unique=False, if_not_exists=False):\n if index_name is None:\n index_name = \"idx_{}_{}\".format(\n self.name.replace(\" \", \"_\"), \"_\".join(columns)\n )\n columns_sql = []\n for column in columns:\n if isinstance(column, DescIndex):\n fmt = \"[{}] desc\"\n else:\n fmt = \"[{}]\"\n columns_sql.append(fmt.format(column))\n sql = (\n textwrap.dedent(\n \"\"\"\n CREATE {unique}INDEX {if_not_exists}[{index_name}]\n ON [{table_name}] ({columns});\n \"\"\"\n )\n .strip()\n .format(\n index_name=index_name,\n table_name=self.name,\n columns=\", \".join(columns_sql),\n unique=\"UNIQUE \" if unique else \"\",\n if_not_exists=\"IF NOT EXISTS \" if if_not_exists else \"\",\n )\n )\n self.db.execute(sql)\n return self\n\n def add_column(\n self, col_name, col_type=None, fk=None, fk_col=None, not_null_default=None\n ):\n fk_col_type = None\n if fk is not None:\n # fk must be a valid table\n if not fk in self.db.table_names():\n raise AlterError(\"table '{}' does not exist\".format(fk))\n # if fk_col specified, must be a valid column\n if fk_col is not None:\n if fk_col not in self.db[fk].columns_dict:\n raise AlterError(\"table '{}' has no column {}\".format(fk, fk_col))\n else:\n # automatically set fk_col to first primary_key of fk table\n pks = [c for c in self.db[fk].columns if c.is_pk]\n if pks:\n fk_col = pks[0].name\n fk_col_type = pks[0].type\n else:\n fk_col = \"rowid\"\n fk_col_type = \"INTEGER\"\n if col_type is None:\n col_type = str\n not_null_sql = None\n if not_null_default is not None:\n not_null_sql = \"NOT NULL DEFAULT {}\".format(self.db.quote(not_null_default))\n sql = \"ALTER TABLE [{table}] ADD COLUMN [{col_name}] {col_type}{not_null_default};\".format(\n table=self.name,\n col_name=col_name,\n col_type=fk_col_type or COLUMN_TYPE_MAPPING[col_type],\n not_null_default=(\" \" + not_null_sql) if not_null_sql else \"\",\n )\n self.db.execute(sql)\n if fk is not None:\n self.add_foreign_key(col_name, fk, fk_col)\n return self\n\n def drop(self, ignore=False):\n try:\n self.db.execute(\"DROP TABLE [{}]\".format(self.name))\n except sqlite3.OperationalError:\n if not ignore:\n raise\n\n def guess_foreign_table(self, column):\n column = column.lower()\n possibilities = [column]\n if column.endswith(\"_id\"):\n column_without_id = column[:-3]\n possibilities.append(column_without_id)\n if not column_without_id.endswith(\"s\"):\n possibilities.append(column_without_id + \"s\")\n elif not column.endswith(\"s\"):\n possibilities.append(column + \"s\")\n existing_tables = {t.lower(): t for t in self.db.table_names()}\n for table in possibilities:\n if table in existing_tables:\n return existing_tables[table]\n # If we get here there's no obvious candidate - raise an error\n raise NoObviousTable(\n \"No obvious foreign key table for column '{}' - tried {}\".format(\n column, repr(possibilities)\n )\n )\n\n def guess_foreign_column(self, other_table):\n pks = [c for c in self.db[other_table].columns if c.is_pk]\n if len(pks) != 1:\n raise BadPrimaryKey(\n \"Could not detect single primary key for table '{}'\".format(other_table)\n )\n else:\n return pks[0].name\n\n def add_foreign_key(\n self, column, other_table=None, other_column=None, ignore=False\n ):\n # Ensure column exists\n if column not in self.columns_dict:\n raise AlterError(\"No such column: {}\".format(column))\n # If other_table is not specified, attempt to guess it from the column\n if other_table is None:\n other_table = self.guess_foreign_table(column)\n # If other_column is not specified, detect the primary key on other_table\n if other_column is None:\n other_column = self.guess_foreign_column(other_table)\n\n # Soundness check that the other column exists\n if (\n not [c for c in self.db[other_table].columns if c.name == other_column]\n and other_column != \"rowid\"\n ):\n raise AlterError(\"No such column: {}.{}\".format(other_table, other_column))\n # Check we do not already have an existing foreign key\n if any(\n fk\n for fk in self.foreign_keys\n if fk.column == column\n and fk.other_table == other_table\n and fk.other_column == other_column\n ):\n if ignore:\n return self\n else:\n raise AlterError(\n \"Foreign key already exists for {} => {}.{}\".format(\n column, other_table, other_column\n )\n )\n self.db.add_foreign_keys([(self.name, column, other_table, other_column)])\n return self\n\n def enable_counts(self):\n sql = (\n textwrap.dedent(\n \"\"\"\n {create_counts_table}\n CREATE TRIGGER IF NOT EXISTS [{table}{counts_table}_insert] AFTER INSERT ON [{table}]\n BEGIN\n INSERT OR REPLACE INTO [{counts_table}]\n VALUES (\n {table_quoted},\n COALESCE(\n (SELECT count FROM [{counts_table}] WHERE [table] = {table_quoted}),\n 0\n ) + 1\n );\n END;\n CREATE TRIGGER IF NOT EXISTS [{table}{counts_table}_delete] AFTER DELETE ON [{table}]\n BEGIN\n INSERT OR REPLACE INTO [{counts_table}]\n VALUES (\n {table_quoted},\n COALESCE(\n (SELECT count FROM [{counts_table}] WHERE [table] = {table_quoted}),\n 0\n ) - 1\n );\n END;\n INSERT OR REPLACE INTO _counts VALUES ({table_quoted}, (select count(*) from [{table}]));\n \"\"\"\n )\n .strip()\n .format(\n create_counts_table=_COUNTS_TABLE_CREATE_SQL.format(\n self.db._counts_table_name\n ),\n counts_table=self.db._counts_table_name,\n table=self.name,\n table_quoted=self.db.quote(self.name),\n )\n )\n with self.db.conn:\n self.db.conn.executescript(sql)\n self.db.use_counts_table = True\n\n @property\n def has_counts_triggers(self):\n trigger_names = {\n \"{table}{counts_table}_{suffix}\".format(\n counts_table=self.db._counts_table_name, table=self.name, suffix=suffix\n )\n for suffix in [\"insert\", \"delete\"]\n }\n return trigger_names.issubset(self.triggers_dict.keys())\n\n def enable_fts(\n self,\n columns,\n fts_version=\"FTS5\",\n create_triggers=False,\n tokenize=None,\n replace=False,\n ):\n \"Enables FTS on the specified columns.\"\n create_fts_sql = (\n textwrap.dedent(\n \"\"\"\n CREATE VIRTUAL TABLE [{table}_fts] USING {fts_version} (\n {columns},{tokenize}\n content=[{table}]\n )\n \"\"\"\n )\n .strip()\n .format(\n table=self.name,\n columns=\", \".join(\"[{}]\".format(c) for c in columns),\n fts_version=fts_version,\n tokenize=\"\\n tokenize='{}',\".format(tokenize) if tokenize else \"\",\n )\n )\n should_recreate = False\n if replace and self.db[\"{}_fts\".format(self.name)].exists():\n # Does the table need to be recreated?\n fts_schema = self.db[\"{}_fts\".format(self.name)].schema\n if fts_schema != create_fts_sql:\n should_recreate = True\n expected_triggers = {self.name + suffix for suffix in (\"_ai\", \"_ad\", \"_au\")}\n existing_triggers = {t.name for t in self.triggers}\n has_triggers = existing_triggers.issuperset(expected_triggers)\n if has_triggers != create_triggers:\n should_recreate = True\n if not should_recreate:\n # Table with correct configuration already exists\n return self\n\n if should_recreate:\n self.disable_fts()\n\n self.db.executescript(create_fts_sql)\n self.populate_fts(columns)\n\n if create_triggers:\n old_cols = \", \".join(\"old.[{}]\".format(c) for c in columns)\n new_cols = \", \".join(\"new.[{}]\".format(c) for c in columns)\n triggers = (\n textwrap.dedent(\n \"\"\"\n CREATE TRIGGER [{table}_ai] AFTER INSERT ON [{table}] BEGIN\n INSERT INTO [{table}_fts] (rowid, {columns}) VALUES (new.rowid, {new_cols});\n END;\n CREATE TRIGGER [{table}_ad] AFTER DELETE ON [{table}] BEGIN\n INSERT INTO [{table}_fts] ([{table}_fts], rowid, {columns}) VALUES('delete', old.rowid, {old_cols});\n END;\n CREATE TRIGGER [{table}_au] AFTER UPDATE ON [{table}] BEGIN\n INSERT INTO [{table}_fts] ([{table}_fts], rowid, {columns}) VALUES('delete', old.rowid, {old_cols});\n INSERT INTO [{table}_fts] (rowid, {columns}) VALUES (new.rowid, {new_cols});\n END;\n \"\"\"\n )\n .strip()\n .format(\n table=self.name,\n columns=\", \".join(\"[{}]\".format(c) for c in columns),\n old_cols=old_cols,\n new_cols=new_cols,\n )\n )\n self.db.executescript(triggers)\n return self\n\n def populate_fts(self, columns):\n sql = (\n textwrap.dedent(\n \"\"\"\n INSERT INTO [{table}_fts] (rowid, {columns})\n SELECT rowid, {columns} FROM [{table}];\n \"\"\"\n )\n .strip()\n .format(\n table=self.name, columns=\", \".join(\"[{}]\".format(c) for c in columns)\n )\n )\n self.db.executescript(sql)\n return self\n\n def disable_fts(self):\n fts_table = self.detect_fts()\n if fts_table:\n self.db[fts_table].drop()\n # Now delete the triggers that related to that table\n sql = (\n textwrap.dedent(\n \"\"\"\n SELECT name FROM sqlite_master\n WHERE type = 'trigger'\n AND sql LIKE '% INSERT INTO [{}]%'\n \"\"\"\n )\n .strip()\n .format(fts_table)\n )\n trigger_names = []\n for row in self.db.execute(sql).fetchall():\n trigger_names.append(row[0])\n with self.db.conn:\n for trigger_name in trigger_names:\n self.db.execute(\"DROP TRIGGER IF EXISTS [{}]\".format(trigger_name))\n return self\n\n def rebuild_fts(self):\n fts_table = self.detect_fts()\n if fts_table is None:\n # Assume this is itself an FTS table\n fts_table = self.name\n self.db.execute(\n \"INSERT INTO [{table}]([{table}]) VALUES('rebuild');\".format(\n table=fts_table\n )\n )\n return self\n\n def detect_fts(self):\n \"Detect if table has a corresponding FTS virtual table and return it\"\n sql = (\n textwrap.dedent(\n \"\"\"\n SELECT name FROM sqlite_master\n WHERE rootpage = 0\n AND (\n sql LIKE '%VIRTUAL TABLE%USING FTS%content=%{table}%'\n OR (\n tbl_name = \"{table}\"\n AND sql LIKE '%VIRTUAL TABLE%USING FTS%'\n )\n )\n \"\"\"\n )\n .strip()\n .format(table=self.name)\n )\n rows = self.db.execute(sql).fetchall()\n if len(rows) == 0:\n return None\n else:\n return rows[0][0]\n\n def optimize(self):\n fts_table = self.detect_fts()\n if fts_table is not None:\n self.db.execute(\n \"\"\"\n INSERT INTO [{table}] ([{table}]) VALUES (\"optimize\");\n \"\"\".strip().format(\n table=fts_table\n )\n )\n return self\n\n def search_sql(self, columns=None, order_by=None, limit=None, offset=None):\n # Pick names for table and rank column that don't clash\n original = \"original_\" if self.name == \"original\" else \"original\"\n columns_sql = \"*\"\n columns_with_prefix_sql = \"[{}].*\".format(original)\n if columns:\n columns_sql = \",\\n \".join(\"[{}]\".format(c) for c in columns)\n columns_with_prefix_sql = \",\\n \".join(\n \"[{}].[{}]\".format(original, c) for c in columns\n )\n fts_table = self.detect_fts()\n assert fts_table, \"Full-text search is not configured for table '{}'\".format(\n self.name\n )\n virtual_table_using = self.db[fts_table].virtual_table_using\n sql = textwrap.dedent(\n \"\"\"\n with {original} as (\n select\n rowid,\n {columns}\n from [{dbtable}]\n )\n select\n {columns_with_prefix}\n from\n [{original}]\n join [{fts_table}] on [{original}].rowid = [{fts_table}].rowid\n where\n [{fts_table}] match :query\n order by\n {order_by}\n {limit_offset}\n \"\"\"\n ).strip()\n if virtual_table_using == \"FTS5\":\n rank_implementation = \"[{}].rank\".format(fts_table)\n else:\n self.db.register_fts4_bm25()\n rank_implementation = \"rank_bm25(matchinfo([{}], 'pcnalx'))\".format(\n fts_table\n )\n limit_offset = \"\"\n if limit is not None:\n limit_offset += \" limit {}\".format(limit)\n if offset is not None:\n limit_offset += \" offset {}\".format(offset)\n return sql.format(\n dbtable=self.name,\n original=original,\n columns=columns_sql,\n columns_with_prefix=columns_with_prefix_sql,\n fts_table=fts_table,\n order_by=order_by or rank_implementation,\n limit_offset=limit_offset.strip(),\n ).strip()\n\n def search(self, q, order_by=None, columns=None, limit=None, offset=None):\n cursor = self.db.execute(\n self.search_sql(\n order_by=order_by,\n columns=columns,\n limit=limit,\n offset=offset,\n ),\n {\"query\": q},\n )\n columns = [c[0] for c in cursor.description]\n for row in cursor:\n yield dict(zip(columns, row))\n\n def value_or_default(self, key, value):\n return self._defaults[key] if value is DEFAULT else value\n\n def delete(self, pk_values):\n if not isinstance(pk_values, (list, tuple)):\n pk_values = [pk_values]\n self.get(pk_values)\n wheres = [\"[{}] = ?\".format(pk_name) for pk_name in self.pks]\n sql = \"delete from [{table}] where {wheres}\".format(\n table=self.name, wheres=\" and \".join(wheres)\n )\n with self.db.conn:\n self.db.execute(sql, pk_values)\n return self\n\n def delete_where(self, where=None, where_args=None):\n if not self.exists():\n return []\n sql = \"delete from [{}]\".format(self.name)\n if where is not None:\n sql += \" where \" + where\n self.db.execute(sql, where_args or [])\n return self\n\n def update(self, pk_values, updates=None, alter=False, conversions=None):\n updates = updates or {}\n conversions = conversions or {}\n if not isinstance(pk_values, (list, tuple)):\n pk_values = [pk_values]\n # Soundness check that the record exists (raises error if not):\n self.get(pk_values)\n if not updates:\n return self\n args = []\n sets = []\n wheres = []\n pks = self.pks\n validate_column_names(updates.keys())\n for key, value in updates.items():\n sets.append(\"[{}] = {}\".format(key, conversions.get(key, \"?\")))\n args.append(jsonify_if_needed(value))\n wheres = [\"[{}] = ?\".format(pk_name) for pk_name in pks]\n args.extend(pk_values)\n sql = \"update [{table}] set {sets} where {wheres}\".format(\n table=self.name, sets=\", \".join(sets), wheres=\" and \".join(wheres)\n )\n with self.db.conn:\n try:\n rowcount = self.db.execute(sql, args).rowcount\n except OperationalError as e:\n if alter and (\" column\" in e.args[0]):\n # Attempt to add any missing columns, then try again\n self.add_missing_columns([updates])\n rowcount = self.db.execute(sql, args).rowcount\n else:\n raise\n\n # TODO: Test this works (rolls back) - use better exception:\n assert rowcount == 1\n self.last_pk = pk_values[0] if len(pks) == 1 else pk_values\n return self\n\n def build_insert_queries_and_params(\n self,\n extracts,\n chunk,\n all_columns,\n hash_id,\n upsert,\n pk,\n conversions,\n num_records_processed,\n replace,\n ignore,\n ):\n # values is the list of insert data that is passed to the\n # .execute() method - but some of them may be replaced by\n # new primary keys if we are extracting any columns.\n values = []\n extracts = resolve_extracts(extracts)\n for record in chunk:\n record_values = []\n for key in all_columns:\n value = jsonify_if_needed(\n record.get(key, None if key != hash_id else _hash(record))\n )\n if key in extracts:\n extract_table = extracts[key]\n value = self.db[extract_table].lookup({\"value\": value})\n record_values.append(value)\n values.append(record_values)\n\n queries_and_params = []\n if upsert:\n if isinstance(pk, str):\n pks = [pk]\n else:\n pks = pk\n self.last_pk = None\n for record_values in values:\n # TODO: make more efficient:\n record = dict(zip(all_columns, record_values))\n sql = \"INSERT OR IGNORE INTO [{table}]({pks}) VALUES({pk_placeholders});\".format(\n table=self.name,\n pks=\", \".join([\"[{}]\".format(p) for p in pks]),\n pk_placeholders=\", \".join([\"?\" for p in pks]),\n )\n queries_and_params.append((sql, [record[col] for col in pks]))\n # UPDATE [book] SET [name] = 'Programming' WHERE [id] = 1001;\n set_cols = [col for col in all_columns if col not in pks]\n if set_cols:\n sql2 = \"UPDATE [{table}] SET {pairs} WHERE {wheres}\".format(\n table=self.name,\n pairs=\", \".join(\n \"[{}] = {}\".format(col, conversions.get(col, \"?\"))\n for col in set_cols\n ),\n wheres=\" AND \".join(\"[{}] = ?\".format(pk) for pk in pks),\n )\n queries_and_params.append(\n (\n sql2,\n [record[col] for col in set_cols]\n + [record[pk] for pk in pks],\n )\n )\n # We can populate .last_pk right here\n if num_records_processed == 1:\n self.last_pk = tuple(record[pk] for pk in pks)\n if len(self.last_pk) == 1:\n self.last_pk = self.last_pk[0]\n\n else:\n or_what = \"\"\n if replace:\n or_what = \"OR REPLACE \"\n elif ignore:\n or_what = \"OR IGNORE \"\n sql = \"\"\"\n INSERT {or_what}INTO [{table}] ({columns}) VALUES {rows};\n \"\"\".strip().format(\n or_what=or_what,\n table=self.name,\n columns=\", \".join(\"[{}]\".format(c) for c in all_columns),\n rows=\", \".join(\n \"({placeholders})\".format(\n placeholders=\", \".join(\n [conversions.get(col, \"?\") for col in all_columns]\n )\n )\n for record in chunk\n ),\n )\n flat_values = list(itertools.chain(*values))\n queries_and_params = [(sql, flat_values)]\n\n return queries_and_params\n\n def insert_chunk(\n self,\n alter,\n extracts,\n chunk,\n all_columns,\n hash_id,\n upsert,\n pk,\n conversions,\n num_records_processed,\n replace,\n ignore,\n ):\n queries_and_params = self.build_insert_queries_and_params(\n extracts,\n chunk,\n all_columns,\n hash_id,\n upsert,\n pk,\n conversions,\n num_records_processed,\n replace,\n ignore,\n )\n\n with self.db.conn:\n result = None\n for query, params in queries_and_params:\n try:\n result = self.db.execute(query, params)\n except OperationalError as e:\n if alter and (\" column\" in e.args[0]):\n # Attempt to add any missing columns, then try again\n self.add_missing_columns(chunk)\n result = self.db.execute(query, params)\n elif e.args[0] == \"too many SQL variables\":\n\n first_half = chunk[: len(chunk) // 2]\n second_half = chunk[len(chunk) // 2 :]\n\n self.insert_chunk(\n alter,\n extracts,\n first_half,\n all_columns,\n hash_id,\n upsert,\n pk,\n conversions,\n num_records_processed,\n replace,\n ignore,\n )\n\n self.insert_chunk(\n alter,\n extracts,\n second_half,\n all_columns,\n hash_id,\n upsert,\n pk,\n conversions,\n num_records_processed,\n replace,\n ignore,\n )\n\n else:\n raise\n if num_records_processed == 1 and not upsert:\n self.last_rowid = result.lastrowid\n self.last_pk = self.last_rowid\n # self.last_rowid will be 0 if a \"INSERT OR IGNORE\" happened\n if (hash_id or pk) and self.last_rowid:\n row = list(self.rows_where(\"rowid = ?\", [self.last_rowid]))[0]\n if hash_id:\n self.last_pk = row[hash_id]\n elif isinstance(pk, str):\n self.last_pk = row[pk]\n else:\n self.last_pk = tuple(row[p] for p in pk)\n\n return\n\n def insert(\n self,\n record,\n pk=DEFAULT,\n foreign_keys=DEFAULT,\n column_order=DEFAULT,\n not_null=DEFAULT,\n defaults=DEFAULT,\n hash_id=DEFAULT,\n alter=DEFAULT,\n ignore=DEFAULT,\n replace=DEFAULT,\n extracts=DEFAULT,\n conversions=DEFAULT,\n columns=DEFAULT,\n ):\n return self.insert_all(\n [record],\n pk=pk,\n foreign_keys=foreign_keys,\n column_order=column_order,\n not_null=not_null,\n defaults=defaults,\n hash_id=hash_id,\n alter=alter,\n ignore=ignore,\n replace=replace,\n extracts=extracts,\n conversions=conversions,\n columns=columns,\n )\n\n def insert_all(\n self,\n records,\n pk=DEFAULT,\n foreign_keys=DEFAULT,\n column_order=DEFAULT,\n not_null=DEFAULT,\n defaults=DEFAULT,\n batch_size=DEFAULT,\n hash_id=DEFAULT,\n alter=DEFAULT,\n ignore=DEFAULT,\n replace=DEFAULT,\n truncate=False,\n extracts=DEFAULT,\n conversions=DEFAULT,\n columns=DEFAULT,\n upsert=False,\n ):\n \"\"\"\n Like .insert() but takes a list of records and ensures that the table\n that it creates (if table does not exist) has columns for ALL of that\n data\n \"\"\"\n pk = self.value_or_default(\"pk\", pk)\n foreign_keys = self.value_or_default(\"foreign_keys\", foreign_keys)\n column_order = self.value_or_default(\"column_order\", column_order)\n not_null = self.value_or_default(\"not_null\", not_null)\n defaults = self.value_or_default(\"defaults\", defaults)\n batch_size = self.value_or_default(\"batch_size\", batch_size)\n hash_id = self.value_or_default(\"hash_id\", hash_id)\n alter = self.value_or_default(\"alter\", alter)\n ignore = self.value_or_default(\"ignore\", ignore)\n replace = self.value_or_default(\"replace\", replace)\n extracts = self.value_or_default(\"extracts\", extracts)\n conversions = self.value_or_default(\"conversions\", conversions)\n columns = self.value_or_default(\"columns\", columns)\n\n if upsert and (not pk and not hash_id):\n raise PrimaryKeyRequired(\"upsert() requires a pk\")\n assert not (hash_id and pk), \"Use either pk= or hash_id=\"\n if hash_id:\n pk = hash_id\n\n assert not (\n ignore and replace\n ), \"Use either ignore=True or replace=True, not both\"\n all_columns = None\n first = True\n num_records_processed = 0\n # We can only handle a max of 999 variables in a SQL insert, so\n # we need to adjust the batch_size down if we have too many cols\n records = iter(records)\n # Peek at first record to count its columns:\n try:\n first_record = next(records)\n except StopIteration:\n return self # It was an empty list\n num_columns = len(first_record.keys())\n assert (\n num_columns <= SQLITE_MAX_VARS\n ), \"Rows can have a maximum of {} columns\".format(SQLITE_MAX_VARS)\n batch_size = max(1, min(batch_size, SQLITE_MAX_VARS // num_columns))\n self.last_rowid = None\n self.last_pk = None\n if truncate and self.exists():\n self.db.execute(\"DELETE FROM [{}];\".format(self.name))\n for chunk in chunks(itertools.chain([first_record], records), batch_size):\n chunk = list(chunk)\n num_records_processed += len(chunk)\n if first:\n if not self.exists():\n # Use the first batch to derive the table names\n column_types = suggest_column_types(chunk)\n column_types.update(columns or {})\n self.create(\n column_types,\n pk,\n foreign_keys,\n column_order=column_order,\n not_null=not_null,\n defaults=defaults,\n hash_id=hash_id,\n extracts=extracts,\n )\n all_columns = set()\n for record in chunk:\n all_columns.update(record.keys())\n all_columns = list(sorted(all_columns))\n if hash_id:\n all_columns.insert(0, hash_id)\n else:\n for record in chunk:\n all_columns += [\n column for column in record if column not in all_columns\n ]\n\n validate_column_names(all_columns)\n first = False\n\n self.insert_chunk(\n alter,\n extracts,\n chunk,\n all_columns,\n hash_id,\n upsert,\n pk,\n conversions,\n num_records_processed,\n replace,\n ignore,\n )\n\n return self\n\n def upsert(\n self,\n record,\n pk=DEFAULT,\n foreign_keys=DEFAULT,\n column_order=DEFAULT,\n not_null=DEFAULT,\n defaults=DEFAULT,\n hash_id=DEFAULT,\n alter=DEFAULT,\n extracts=DEFAULT,\n conversions=DEFAULT,\n columns=DEFAULT,\n ):\n return self.upsert_all(\n [record],\n pk=pk,\n foreign_keys=foreign_keys,\n column_order=column_order,\n not_null=not_null,\n defaults=defaults,\n hash_id=hash_id,\n alter=alter,\n extracts=extracts,\n conversions=conversions,\n columns=columns,\n )\n\n def upsert_all(\n self,\n records,\n pk=DEFAULT,\n foreign_keys=DEFAULT,\n column_order=DEFAULT,\n not_null=DEFAULT,\n defaults=DEFAULT,\n batch_size=DEFAULT,\n hash_id=DEFAULT,\n alter=DEFAULT,\n extracts=DEFAULT,\n conversions=DEFAULT,\n columns=DEFAULT,\n ):\n return self.insert_all(\n records,\n pk=pk,\n foreign_keys=foreign_keys,\n column_order=column_order,\n not_null=not_null,\n defaults=defaults,\n batch_size=batch_size,\n hash_id=hash_id,\n alter=alter,\n extracts=extracts,\n conversions=conversions,\n columns=columns,\n upsert=True,\n )\n\n def add_missing_columns(self, records):\n needed_columns = suggest_column_types(records)\n current_columns = {c.lower() for c in self.columns_dict}\n for col_name, col_type in needed_columns.items():\n if col_name.lower() not in current_columns:\n self.add_column(col_name, col_type)\n return self\n\n def lookup(self, column_values):\n # lookups is a dictionary - all columns will be used for a unique index\n assert isinstance(column_values, dict)\n if self.exists():\n self.add_missing_columns([column_values])\n unique_column_sets = [set(i.columns) for i in self.indexes]\n if set(column_values.keys()) not in unique_column_sets:\n self.create_index(column_values.keys(), unique=True)\n wheres = [\"[{}] = ?\".format(column) for column in column_values]\n rows = list(\n self.rows_where(\n \" and \".join(wheres), [value for _, value in column_values.items()]\n )\n )\n try:\n return rows[0][\"id\"]\n except IndexError:\n return self.insert(column_values, pk=\"id\").last_pk\n else:\n pk = self.insert(column_values, pk=\"id\").last_pk\n self.create_index(column_values.keys(), unique=True)\n return pk\n\n def m2m(\n self,\n other_table,\n record_or_iterable=None,\n pk=DEFAULT,\n lookup=None,\n m2m_table=None,\n alter=False,\n ):\n if isinstance(other_table, str):\n other_table = self.db.table(other_table, pk=pk)\n our_id = self.last_pk\n if lookup is not None:\n assert record_or_iterable is None, \"Provide lookup= or record, not both\"\n else:\n assert record_or_iterable is not None, \"Provide lookup= or record, not both\"\n tables = list(sorted([self.name, other_table.name]))\n columns = [\"{}_id\".format(t) for t in tables]\n if m2m_table is not None:\n m2m_table_name = m2m_table\n else:\n # Detect if there is a single, unambiguous option\n candidates = self.db.m2m_table_candidates(self.name, other_table.name)\n if len(candidates) == 1:\n m2m_table_name = candidates[0]\n elif len(candidates) > 1:\n raise NoObviousTable(\n \"No single obvious m2m table for {}, {} - use m2m_table= parameter\".format(\n self.name, other_table.name\n )\n )\n else:\n # If not, create a new table\n m2m_table_name = m2m_table or \"{}_{}\".format(*tables)\n m2m_table = self.db.table(m2m_table_name, pk=columns, foreign_keys=columns)\n if lookup is None:\n # if records is only one record, put the record in a list\n records = (\n [record_or_iterable]\n if isinstance(record_or_iterable, Mapping)\n else record_or_iterable\n )\n # Ensure each record exists in other table\n for record in records:\n id = other_table.insert(\n record, pk=pk, replace=True, alter=alter\n ).last_pk\n m2m_table.insert(\n {\n \"{}_id\".format(other_table.name): id,\n \"{}_id\".format(self.name): our_id,\n },\n replace=True,\n )\n else:\n id = other_table.lookup(lookup)\n m2m_table.insert(\n {\n \"{}_id\".format(other_table.name): id,\n \"{}_id\".format(self.name): our_id,\n },\n replace=True,\n )\n return self\n\n def analyze_column(\n self, column, common_limit=10, value_truncate=None, total_rows=None\n ):\n db = self.db\n table = self.name\n if total_rows is None:\n total_rows = db[table].count\n\n def truncate(value):\n if value_truncate is None or isinstance(value, (float, int)):\n return value\n value = str(value)\n if len(value) > value_truncate:\n value = value[:value_truncate] + \"...\"\n return value\n\n num_null = db.execute(\n \"select count(*) from [{}] where [{}] is null\".format(table, column)\n ).fetchone()[0]\n num_blank = db.execute(\n \"select count(*) from [{}] where [{}] = ''\".format(table, column)\n ).fetchone()[0]\n num_distinct = db.execute(\n \"select count(distinct [{}]) from [{}]\".format(column, table)\n ).fetchone()[0]\n most_common = None\n least_common = None\n if num_distinct == 1:\n value = db.execute(\n \"select [{}] from [{}] limit 1\".format(column, table)\n ).fetchone()[0]\n most_common = [(truncate(value), total_rows)]\n elif num_distinct != total_rows:\n most_common = [\n (truncate(r[0]), r[1])\n for r in db.execute(\n \"select [{}], count(*) from [{}] group by [{}] order by count(*) desc, [{}] limit {}\".format(\n column, table, column, column, common_limit\n )\n ).fetchall()\n ]\n most_common.sort(key=lambda p: (p[1], p[0]), reverse=True)\n if num_distinct <= common_limit:\n # No need to run the query if it will just return the results in revers order\n least_common = None\n else:\n least_common = [\n (truncate(r[0]), r[1])\n for r in db.execute(\n \"select [{}], count(*) from [{}] group by [{}] order by count(*), [{}] desc limit {}\".format(\n column, table, column, column, common_limit\n )\n ).fetchall()\n ]\n least_common.sort(key=lambda p: (p[1], p[0]))\n return ColumnDetails(\n self.name,\n column,\n total_rows,\n num_null,\n num_blank,\n num_distinct,\n most_common,\n least_common,\n )\n\n\nclass View(Queryable):\n def exists(self):\n return True\n\n def __repr__(self):\n return \"\".format(\n self.name, \", \".join(c.name for c in self.columns)\n )\n\n def drop(self, ignore=False):\n try:\n self.db.execute(\"DROP VIEW [{}]\".format(self.name))\n except sqlite3.OperationalError:\n if not ignore:\n raise\n\n def enable_fts(self, *args, **kwargs):\n raise NotImplementedError(\n \"enable_fts() is supported on tables but not on views\"\n )\n\n\ndef chunks(sequence, size):\n iterator = iter(sequence)\n for item in iterator:\n yield itertools.chain([item], itertools.islice(iterator, size - 1))\n\n\ndef jsonify_if_needed(value):\n if isinstance(value, decimal.Decimal):\n return float(value)\n if isinstance(value, (dict, list, tuple)):\n return json.dumps(value, default=repr, ensure_ascii=False)\n elif isinstance(value, (datetime.time, datetime.date, datetime.datetime)):\n return value.isoformat()\n elif isinstance(value, uuid.UUID):\n return str(value)\n else:\n return value\n\n\ndef _hash(record):\n return hashlib.sha1(\n json.dumps(record, separators=(\",\", \":\"), sort_keys=True, default=repr).encode(\n \"utf8\"\n )\n ).hexdigest()\n\n\ndef resolve_extracts(extracts):\n if extracts is None:\n extracts = {}\n if isinstance(extracts, (list, tuple)):\n extracts = {item: item for item in extracts}\n return extracts\n\n\ndef validate_column_names(columns):\n # Validate no columns contain '[' or ']' - #86\n for column in columns:\n assert (\n \"[\" not in column and \"]\" not in column\n ), \"'[' and ']' cannot be used in column names\"\n", "sub_path": "sqlite_utils/db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 79196, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "re.compile", "line_number": 21, "usage_type": "call"}, {"api_name": "re.VERBOSE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "re.IGNORECASE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "collections.namedtuple", "line_number": 51, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 54, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 67, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 70, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 71, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 72, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 89, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 90, "usage_type": "attribute"}, {"api_name": "decimal.Decimal", "line_number": 91, "usage_type": "attribute"}, {"api_name": "uuid.UUID", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.int8", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.int16", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.int64", "line_number": 111, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.uint32", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.uint64", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pandas.Timestamp", "line_number": 124, "usage_type": "attribute"}, {"api_name": "utils.sqlite3.connect", "line_number": 180, "usage_type": "call"}, {"api_name": "utils.sqlite3", "line_number": 180, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 183, "usage_type": "call"}, {"api_name": "utils.sqlite3.connect", "line_number": 184, "usage_type": "call"}, {"api_name": "utils.sqlite3", "line_number": 184, "usage_type": "name"}, {"api_name": "contextlib.contextmanager", "line_number": 194, "usage_type": "attribute"}, {"api_name": "inspect.signature", "line_number": 212, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 216, "usage_type": "attribute"}, {"api_name": "sqlite_fts4.rank_bm25", "line_number": 228, "usage_type": "argument"}, {"api_name": "pathlib.Path", "line_number": 234, "usage_type": "call"}, {"api_name": "utils.OperationalError", "line_number": 348, "usage_type": "name"}, {"api_name": "utils.column_affinity", "line_number": 738, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 999, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 1113, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 1205, "usage_type": "call"}, {"api_name": "utils.sqlite3.OperationalError", "line_number": 1263, "usage_type": "attribute"}, {"api_name": "utils.sqlite3", "line_number": 1263, "usage_type": "name"}, {"api_name": "textwrap.dedent", "line_number": 1337, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 1399, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 1440, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 1467, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 1487, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 1520, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 1569, "usage_type": "call"}, {"api_name": "utils.OperationalError", "line_number": 1674, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 1778, "usage_type": "call"}, {"api_name": "utils.OperationalError", "line_number": 1815, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 1969, "usage_type": "call"}, {"api_name": "utils.suggest_column_types", "line_number": 1975, "usage_type": "call"}, {"api_name": "utils.suggest_column_types", "line_number": 2078, "usage_type": "call"}, {"api_name": "collections.abc.Mapping", "line_number": 2147, "usage_type": "argument"}, {"api_name": "utils.sqlite3.OperationalError", "line_number": 2252, "usage_type": "attribute"}, {"api_name": "utils.sqlite3", "line_number": 2252, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 2265, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 2265, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 2269, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 2272, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 2273, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 2273, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 2273, "usage_type": "attribute"}, {"api_name": "uuid.UUID", "line_number": 2275, "usage_type": "attribute"}, {"api_name": "hashlib.sha1", "line_number": 2282, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 2283, "usage_type": "call"}]} +{"seq_id": "467353072", "text": "from django.shortcuts import render, redirect,get_object_or_404\nfrom .models import *\nfrom .forms import *\nfrom django.contrib import messages\nfrom django.http import JsonResponse\nfrom django.contrib.sessions.models import Session\nfrom django.db import connection\nfrom django.core import serializers\nfrom django.http import HttpResponse\nfrom django.db.models import Count\nfrom django.db.models import Sum\nfrom datetime import datetime\nfrom django.http import HttpResponseRedirect\nfrom django.conf.urls import url\n\n\ndef adminlogin(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n post = SchoolDetail.objects.filter(username=username,password=password)\n if post:\n username = request.POST['username']\n request.session['username'] = username\n a = request.session['username']\n sess = SchoolDetail.objects.only('id').get(username=a).id\n request.session['schoolname']=sess\n return redirect(\"dashboard\")\n else:\n messages.success(request, 'Invalid Username or Password')\n return render(request, 'index.html', {})\ndef teacher_login(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n post = StaffDetail.objects.filter(username=username,password=password)\n if post:\n username = request.POST['username']\n request.session['teacher'] = username\n a = request.session['teacher']\n user_id = StaffDetail.objects.only('staff_id').get(username=a).staff_id\n request.session['user_id']=user_id\n sess = StaffDetail.objects.only('school_id_id').get(staff_id=user_id).school_id_id\n request.session['schoolname']=sess\n return redirect(\"teacher_dashboard\")\n else:\n messages.success(request, 'Invalid Username or Password')\n return render(request, 'teacher_login.html', {})\ndef student_login(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n post = StudentDetail.objects.filter(username=username,password=password)\n if post:\n username = request.POST['username']\n request.session['student'] = username\n a=request.session['student']\n stud_id = StudentDetail.objects.only('id').get(username=a).id\n request.session['student_id']=stud_id\n stud_reg_no = StudentDetail.objects.only('register_number').get(username=a).register_number\n request.session['register_number']=stud_reg_no\n sess = StudentDetail.objects.only('school_id_id').get(username=a).school_id_id\n request.session['schoolname']=sess\n return redirect(\"student_dashboard\")\n else:\n messages.success(request, 'Invalid Username or Password')\n return render(request, 'student_login.html', {})\ndef dashboard(request):\n\tif request.session.has_key('username'):\n\t\tposts = request.session['username']\n\t\tquery = SchoolDetail.objects.filter(username=posts)\n\t\treturn render(request, 'dashboard.html', {\"query\":query})\n\telse:\n\t\treturn render(request, 'index.html',{})\ndef teacher_dashboard(request):\n\treturn render(request, 'teacher_dashboard.html',{})\ndef student_dashboard(request):\n\treturn render(request, 'student_dashboard.html',{})\ndef logout(request):\n try:\n Session.objects.all().delete()\n except:\n pass\n return render(request, 'index.html', {})\n\ndef teacher_logout(request):\n try:\n Session.objects.all().delete()\n except:\n pass\n return render(request, 'teacher_login.html', {})\n\ndef student_logout(request):\n try:\n Session.objects.all().delete()\n except:\n pass\n return render(request, 'student_login.html', {})\n\ndef menu(request):\n if request.session.has_key('username'):\n posts = request.session['username']\n query = SchoolDetail.objects.filter(username=posts)\n data = {\n \"user\":user\n }\n return JsonResponse(data)\ndef add_teacher(request):\n if request.method == \"POST\":\n form = StaffForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect('teachers')\n else:\n form = StaffForm()\n return render(request, 'add_teacher.html', {'forms':form})\ndef teachers(request):\n school_id = request.session['schoolname']\n teacher = StaffDetail.objects.filter(school_id_id=school_id)\n return render(request, 'teachers.html', {'teacher':teacher})\ndef add_student(request):\n if request.method == \"POST\":\n form = StudentForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect('students')\n else:\n form = StudentForm()\n return render(request, 'add_students.html', {'form':form})\ndef edit_teacher(request,pk):\n post = get_object_or_404(StaffDetail, pk=pk)\n if request.method == \"POST\":\n form = StaffEditForm(request.POST,instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect('teachers')\n else:\n form = StaffEditForm(instance=post)\n return render(request, 'edit_teacher.html', {'form': form})\ndef teacher_delete(request, pk):\n student = StaffDetail.objects.get(staff_id=pk)\n student.delete()\n return redirect(\"teachers\")\ndef students(request):\n school_id = request.session['schoolname']\n student = StudentDetail.objects.filter(school_id_id=school_id)\n return render(request, 'students.html', {'student':student})\ndef student_edit(request,pk):\n post = get_object_or_404(StudentDetail, pk=pk)\n if request.method == \"POST\":\n form = StudentEditForm(request.POST, request.FILES, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect(\"students\")\n else:\n form = StudentEditForm(instance=post)\n return render(request, 'student_edit.html', {'form': form})\ndef student_delete(request, pk):\n student = StudentDetail.objects.get(id=pk)\n student.delete()\n return redirect(\"students\")\ndef add_class_section(request):\n\tif request.method == \"POST\":\n\t\tschool_id = request.POST.get('school_id')\n\t\tclass_name = request.POST.get('class_name')\n\t\tname = class_name.split(', ')\n\t\tClass.objects.bulk_create([Class(school_id=school_id,class_name=name)])\n\treturn render(request, 'add_classes_sections.html', {})\ndef classes_sections(request):\n return render(request, 'classes_sections.html', {})\ndef add_subject(request):\n school_id = request.session['schoolname']\n academic_year = request.session['academic_year']\n row = Subject.objects.filter(school_id=school_id).select_related('class_id').select_related('section_id').order_by('subject_id')\n if request.method == \"POST\":\n form = SubjectForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n messages.success(request, 'Subject Added Successfully')\n form = SubjectForm()\n else:\n form = SubjectForm()\n\n return render(request, 'add_subject.html', {'row':row,'form':form})\ndef edit_subject(request,pk):\n post = get_object_or_404(Subject, pk=pk)\n if request.method == \"POST\":\n form = SubjectForm(request.POST,instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect('add_subject')\n else:\n form = SubjectForm(instance=post)\n return render(request, 'edit_subject.html', {'form': form})\ndef delete_subject(request, pk):\n subject = Subject.objects.get(subject_id=pk)\n subject.delete()\n return redirect(\"add_subject\")\ndef assign_subjects_to_teachers(request):\n school_id = request.session['schoolname']\n academic_year = request.session['academic_year']\n teacher = StaffDetail.objects.filter(school_id=school_id)\n class_id = Class.objects.filter(school_id=school_id,academic_year=academic_year)\n if request.method == \"POST\":\n form = AssignTeacherForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect(\"assign_teachers_view\") \n else:\n form = AssignTeacherForm()\n return render(request, 'assign_subjects_to_teachers.html', {'form':form,'teacher':teacher,'class_id':class_id})\ndef edit_assign_subjects_to_teachers(request,pk):\n post = get_object_or_404(AssignSubjectTeacher, pk=pk)\n teacher = AssignSubjectTeacher.objects.filter(assign_subject_teacher_id=pk)\n if request.method == \"POST\":\n form = AssignTeacherEditForm(request.POST,instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect(\"assign_teachers_view\")\n else:\n form = AssignTeacherEditForm(instance=post)\n return render(request, 'edit_assign_subjects_to_teachers.html', {'form':form,'teacher':teacher})\ndef delete_assign_teacher(request,pk):\n subject = AssignSubjectTeacher.objects.get(assign_subject_teacher_id=pk)\n subject.delete()\n return redirect(\"assign_teachers_view\")\ndef add_exam(request):\n school_id = request.session['schoolname']\n academic_year = request.session['academic_year']\n exam = Exam.objects.filter(school_id=school_id ,class_id__in=Class.objects.filter(academic_year=academic_year)).select_related('class_id').select_related('section_id').order_by('exam_id')\n if request.method == \"POST\":\n form = ExamForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n messages.success(request, 'Exam Added Successfully')\n form = ExamForm()\n else:\n form = ExamForm()\n return render(request, 'add_exam.html', {'form':form,'exam':exam})\ndef edit_exam(request,pk):\n post = get_object_or_404(Exam, pk=pk)\n if request.method == \"POST\":\n form = ExamEditForm(request.POST,instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect('add_exam')\n else:\n form = ExamEditForm(instance=post)\n return render(request, 'edit_exam.html', {'form': form})\ndef delete_exam(request, pk):\n exam = Exam.objects.get(exam_id=pk)\n exam.delete()\n return redirect(\"add_exam\")\ndef exams(request):\n return render(request, 'exams.html', {})\ndef change_password(request):\n return render(request, 'change_password.html', {})\ndef pagination_count_year(request):\n return render(request, 'other_settings.html', {})\ndef student_section(request):\n school_id=request.session['schoolname']\n student = StudentDetail.objects.filter(school_id=school_id)\n if request.method == \"POST\":\n form = StudentSectionForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect(\"view_student_section\")\n else:\n form = StudentSectionForm()\n\n return render(request, 'student_section.html', {'form':form,'student':student})\ndef edit_student_section(request,pk):\n post = get_object_or_404(StudentSection, pk=pk)\n if request.method == \"POST\":\n form = StudentSectionEditForm(request.POST,instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n return redirect(\"view_student_section\")\n else:\n form = StudentSectionEditForm(instance=post)\n return render(request, 'edit_student_section.html', {'form': form , 'post' :post})\n\ndef delete_student_section(request, pk):\n student = StudentSection.objects.get(student_section_id=pk)\n student.delete()\n return redirect(\"view_student_section\")\n\ndef student_result(request):\n return render(request, 'student_result.html', {})\n\ndef student_mark(request):\n return render(request, 'student_mark.html', {})\n\ndef ajax_exam_class_search(request):\n academic_year = request.POST.get('academic_year')\n school_id = request.session['schoolname']\n cursor = connection.cursor()\n post = '''SELECT school_class.class_id,school_class.class_name from school_class where school_class.academic_year='%s' AND school_class.school_id_id='%d' ''' % (academic_year,school_id)\n query = cursor.execute(post)\n row = cursor.fetchall()\n data = {\n 'class_name':row\n }\n return JsonResponse(data)\n\ndef student_view_marks(request):\n student_id = request.session['student_id']\n student_name = StudentSection.objects.filter(student_id=student_id)\n return render(request, 'student_view_marks.html', {'student_name':student_name})\n\ndef student_view_diary_notes(request):\n student_id = request.session['student_id']\n cursor = connection.cursor()\n student = ''' SELECT * from school_studentsection where school_studentsection.student_id_id='%d' ''' % (student_id)\n query = cursor.execute(student)\n row = cursor.fetchone()\n cls_id = row[2]\n sec_id = row[3]\n task = StudentDiary.objects.filter(class_id=cls_id,section_id=sec_id)\n return render(request, 'student_view_diary_notes.html', {'student':task})\n\ndef manage_student_marks(request):\n school_id=request.session['schoolname']\n academic_year=request.session['academic_year']\n staff_id=request.session['user_id']\n staff = AssignSubjectTeacher.objects.filter(staff_id_id=staff_id,class_id__in=Class.objects.filter(academic_year=academic_year,school_id_id=school_id)).select_related('class_id').select_related('section_id').select_related('subject_id')\n mark=Exam.objects.filter(school_id_id=school_id,class_id__in=Class.objects.filter(academic_year=academic_year)).select_related('class_id') \n class_id = staff.values('class_id')\n section_id = staff.values('section_id')\n\n student = StudentSection.objects.filter(class_id__in=AssignSubjectTeacher.objects.filter(staff_id_id=staff_id,class_id__in=Class.objects.filter(\n academic_year=academic_year,school_id_id=school_id)).select_related('class_id').select_related('section_id').values('class_id'),section_id__in=AssignSubjectTeacher.objects.filter(staff_id_id=staff_id,class_id__in=Class.objects.filter(\n academic_year=academic_year,school_id_id=school_id)).select_related('class_id').select_related('section_id').values('section_id'))\n\n return render(request, 'manage_student_marks.html', {'mark':mark,'staff':staff,'class_id':class_id,'section_id':section_id,'student':student})\ndef view_manage_student_marks(request):\n if request.session['user_id']:\n cls_id = request.GET.get('cls_id')\n sec_id = request.GET.get('sec_id')\n class_id = int(cls_id)\n section_id = int(sec_id)\n school_id = request.session['schoolname']\n staff_id = request.session['user_id']\n academic_year = request.session['academic_year']\n student = StudentSection.objects.filter(class_id_id=class_id,\n section_id_id=section_id,academic_year=academic_year,student_id__in=StudentDetail.objects.filter(school_id=school_id)) \n return render(request, 'view_manage_student_marks.html', {'post':student})\n else:\n return redirect(\"teacher_login\")\n\ndef add_student_diary_notes(request):\n if request.GET.get('class_id') and request.GET.get('sec_id'):\n class_id = request.GET.get('class_id')\n cls_id = int(class_id)\n section_id = request.GET.get('sec_id')\n sec_id = int(section_id)\n staff_id=request.session['user_id']\n if request.method == \"POST\":\n homework_classwork = int(request.POST.get('homework_classwork'))\n subject = request.POST.get('subject_id_id')\n assigned_date = request.POST.get('assigned_date')\n assigned_date =datetime.strptime(assigned_date, \"%d-%m-%Y\").strftime('%Y-%m-%d')\n diary_notes = request.POST.get('diary_notes')\n StudentDiary.objects.create(subject_id_id=subject,assigned_date=assigned_date, diary_note=diary_notes,class_id_id=cls_id,section_id_id=sec_id,staff_id_id=staff_id, homework_classwork=homework_classwork)\n messages.success(request,'Diary Added Successfully')\n school_id = request.session['schoolname']\n academic_year = request.session['academic_year']\n student = StudentSection.objects.filter(class_id=cls_id,academic_year=academic_year,section_id=sec_id,student_id_id__in=StudentDetail.objects.filter(school_id=school_id) )\n subject = AssignSubjectTeacher.objects.filter(section_id=sec_id,class_id__in=Class.objects.filter(academic_year=academic_year,class_id=int(cls_id)), staff_id=staff_id)\n return render(request,'add_student_diary_notes.html',{'subject':subject,'student':student})\n else:\n return redirect(\"teacher_class_diary\")\n\ndef view_student_diary_notes(request):\n if request.method == \"POST\":\n diary_id = request.POST.get('diary_id')\n ids = int(diary_id)\n diary = StudentDiary.objects.get(diary_id=ids)\n diary.delete()\n if request.GET.get('class_id') and request.GET.get('sec_id'):\n class_id = request.GET.get('class_id')\n cls_id = int(class_id)\n section_id = request.GET.get('sec_id')\n sec_id = int(section_id)\n staff_id=request.session['user_id']\n school_id = request.session['schoolname']\n academic_year = request.session['academic_year']\n diary_data = StudentDiary.objects.filter(class_id_id__in=Class.objects.filter(class_id=cls_id,academic_year=academic_year),section_id_id=sec_id,staff_id_id=staff_id )\n return render(request,'view_student_diary_notes.html',{'diary_data':diary_data})\n else:\n return render(request, 'view_student_diary_notes.html', {})\ndef view_student_report(request):\n return render(request, 'view_student_report.html', {})\ndef teacher_change_password(request):\n staff_id=request.session['user_id']\n get_teacher=\"\"\n if request.method == \"POST\":\n get_psw = request.POST['password']\n password = StaffDetail.objects.filter(staff_id=staff_id).update(password=get_psw)\n messages.success(request, 'Password Updated Successfully')\n else:\n get_teacher = StaffDetail.objects.filter(staff_id=staff_id)\n return render(request,'teacher_change_password.html',{\"teacher\": get_teacher})\ndef teacher_other_settings(request):\n return render(request, 'teacher_other_settings.html', {})\n\ndef view_student_section(request):\n academic_year = request.session['academic_year'];\n school_id = request.session['schoolname'];\n row = StudentSection.objects.filter(academic_year=academic_year,student_id__in=StudentDetail.objects.filter(school_id_id=school_id)).select_related('student_id').select_related('class_id').select_related('section_id').order_by('-student_section_id')\n return render(request, 'view_student_section.html', {'row' : row})\n\n\ndef select_section(request):\n school_id = request.session['schoolname']\n class_id = request.POST.get('class_id')\n academic_year = request.session['academic_year']\n cursor = connection.cursor()\n post = '''SELECT * from school_class INNER JOIN school_section ON\n school_class.class_id=school_section.class_id_id where school_section.class_id_id='%d'\n AND school_class.school_id_id='%d' AND school_class.academic_year='%s' AND school_section.school_id_id='%d' ''' % (int(class_id),int(school_id),academic_year,int(school_id))\n query = cursor.execute(post)\n row = cursor.fetchall()\n data = {\n\n 'rowval':row\n }\n return JsonResponse(data)\n\ndef select_teacher_section(request):\n school_id = request.session['schoolname']\n class_id = request.POST.get('class_id')\n academic_year = request.session['academic_year']\n teacher_id = request.session['user_id']\n cursor = connection.cursor()\n post = '''SELECT COUNT(school_class.class_id),COUNT(school_section.section_id), from school_class INNER JOIN school_section ON\n school_class.class_id=school_section.class_id_id INNER JOIN school_assignsubjectteacher ON\n school_assignsubjectteacher.section_id_id=school_section.section_id where school_section.class_id_id='%d'\n AND school_class.school_id_id='%d' AND school_class.academic_year='%s' AND school_section.school_id_id='%d'\n AND school_assignsubjectteacher.staff_id_id='%d' GROUP BY school_class.class_id''' % (int(class_id),int(school_id),academic_year,int(school_id),int(teacher_id))\n query = cursor.execute(post)\n row = cursor.fetchall()\n data = {\n\n 'rowval':row\n }\n return JsonResponse(data)\n\ndef select_academic_year(request):\n school_id = request.session['schoolname']\n cursor = connection.cursor()\n post = '''SELECT * from school_schoolsetting where school_schoolsetting.school_id_id='%d' ''' %(int(school_id))\n query = cursor.execute(post)\n row = cursor.fetchone()\n year = row[1]\n request.session['academic_year'] = year\n academic_year = request.session['academic_year']\n data = {\n\n 'academic_year':academic_year\n\n }\n return JsonResponse(data)\n\ndef select_class(request):\n school_id = request.POST.get('school_id')\n academic_year = request.session['academic_year']\n cursor = connection.cursor()\n post = '''SELECT * from school_class where school_class.academic_year='%s' ''' % (academic_year)\n query = cursor.execute(post)\n row = cursor.fetchall()\n data = {\n\n 'row':row\n }\n return JsonResponse(data)\n\ndef student_section_class_select(request):\n school_id = request.session['schoolname']\n class_id = request.POST.get('class_id')\n\n cursor = connection.cursor()\n post = '''SELECT * from school_class INNER JOIN school_section ON\n school_class.class_id=school_section.class_id_id where school_section.class_id_id='%d'\n AND school_class.school_id_id='%d' ''' % (int(class_id),int(school_id))\n query = cursor.execute(post)\n row = cursor.fetchall()\n\n data = {\n 'row':row\n }\n\n return JsonResponse(data)\n\ndef section_selection_student(request):\n\n sec_id = request.GET.get('id')\n cursor = connection.cursor()\n post = '''SELECT * from school_StudentSection where school_StudentSection.student_section_id='%d'\n ''' % (int(sec_id))\n query = cursor.execute(post)\n row = cursor.fetchall()\n\n data = {\n 'row':row\n }\n return JsonResponse(data)\n\ndef select_subject(request):\n ids = request.POST.get('class_id')\n class_id = int(ids)\n sec_id = request.POST.get('section_id')\n section_id = int(sec_id)\n academic_year = request.session['academic_year']\n school_id = request.session['schoolname']\n cursor = connection.cursor()\n post = '''SELECT * from school_class INNER JOIN school_section ON school_class.class_id=school_section.class_id_id\n INNER JOIN school_subject ON school_subject.section_id_id=school_section.section_id where school_subject.class_id_id ='%d'\n AND school_subject.section_id_id='%d' AND school_class.school_id_id='%d' AND school_class.academic_year='%s' AND school_section.school_id_id='%d' ''' % (class_id,section_id,school_id,academic_year,school_id)\n query = cursor.execute(post)\n row = cursor.fetchall()\n\n data = {\n 'subject':row\n }\n return JsonResponse(data)\n\ndef assign_teachers_view(request):\n academic_year = request.session['academic_year']\n school_id = request.session['schoolname']\n teachers = AssignSubjectTeacher.objects.filter(academic_year=academic_year,staff_id__in=StaffDetail.objects.filter(school_id_id=school_id)).select_related('class_id').select_related('section_id').select_related('subject_id').select_related('staff_id').order_by('-assign_subject_teacher_id')\n return render(request,'assign_teachers_view.html',{'teachers':teachers})\n\ndef admin_change_password(request):\n admin_id = request.session['schoolname']\n get_admin=\"\"\n if request.method == \"POST\":\n get_psw = request.POST['password']\n password = SchoolDetail.objects.filter(id=admin_id).update(password=get_psw)\n messages.success(request, 'Password Updated Successfully')\n else:\n get_admin = SchoolDetail.objects.filter(id=admin_id)\n return render(request,'change_password.html',{\"school\": get_admin})\n\ndef manage_students_marks(request):\n ex=\"\"\n school_id=request.session['schoolname']\n academic_year=request.session['academic_year']\n staff_id=request.session['user_id']\n query = AssignSubjectTeacher.objects.filter(staff_id_id=staff_id,academic_year=academic_year)\n staff = query.values('class_id', 'section_id','class_id__class_name','section_id__section_name').annotate(count = Count('class_id')).order_by('class_id__class_name')\n class_id = query.values('class_id')\n section_id = query.values('section_id')\n count = len(class_id)\n for i in range(0,count):\n \texams = Exam.objects.filter(class_id__in=class_id,section_id__in=section_id)\n \tex = exams.values('class_id', 'section_id','class_id__class_name','section_id__section_name','exam_id','exams').annotate(count = Count('class_id'))\n \n return render(request,'manage_students_marks.html',{'staff':staff,'class_id':class_id,'exams':ex,'section_id':section_id})\n\ndef ajax_student_marks(request):\n cls_id = request.POST.get('class_id')\n sec_id = request.POST.get('section_id')\n sub_id = request.POST.get('subject_id')\n request.session['class_id'] = cls_id\n data = {\n 'cls_id':cls_id,\n 'sec_id':sec_id,\n 'sub_id':sub_id\n }\n return JsonResponse(data)\ndef add_student_marks(request,cls_id,sec_id,exam_id):\n rc = dict()\n teacher_id = request.session['user_id']\n cursor = connection.cursor()\n get_mark = ''' SELECT school_Mark.*,school_studentdetail.register_number,school_studentdetail.student_name,school_Subject.subject_name,school_Class.class_name,school_Section.section_name from school_Mark\n LEFT JOIN school_studentdetail ON school_studentdetail.register_number=school_Mark.student_reg_no\n LEFT JOIN school_Subject ON school_Subject.subject_id=school_Mark.subject_id\n LEFT JOIN school_Class ON school_Class.class_id=school_Mark.class_id\n LEFT JOIN school_Section ON school_Section.section_id=school_Mark.section_id\n WHERE school_Mark.class_id=%d AND school_Mark.section_id=%d AND school_Mark.staff_id=%d AND school_Mark.exam_id=%d '''%(int(cls_id),int(sec_id),int(teacher_id),int(exam_id))\n sub = cursor.execute(get_mark)\n mark = cursor.fetchall()\n school_id = request.session['schoolname']\n academic_year = request.session['academic_year']\n\n if request.method == \"POST\":\n if request.POST.getlist('mark_id[]'):\n ids = request.POST.getlist('mark_id[]')\n mark = request.POST.getlist('student_mark[]')\n length=len(mark)\n for row in range(0,length):\n if mark[row]==\"\" :\n mark[row]=0;\n Mark.objects.filter(mark_id=ids[row]).update(mark=mark[row])\n return redirect(\"manage_students_marks\")\n else :\n subject_id = request.POST.getlist('subject_id[]')\n mark = request.POST.getlist('student_mark[]')\n student_id = request.POST.getlist('stud_reg_no[]')\n staff_id = request.POST.getlist('staff_id[]')\n length=len(mark)\n for row in range(0,length):\n if mark[row]==\"\":\n mark[row]=0;\n Mark.objects.create(student_reg_no=student_id[row],class_id=cls_id,section_id=sec_id,subject_id=subject_id[row],staff_id=staff_id[row],mark=mark[row],exam_id=exam_id)\n return redirect(\"manage_students_marks\")\n #exam = Exam.objects.filter(section_id=int(sec_id),school_id=school_id,class_id__in=Class.objects.filter(academic_year=academic_year,class_id=int(cls_id)))\n exam = Exam.objects.filter(exam_id=exam_id)\n student = StudentSection.objects.filter(class_id=cls_id,academic_year=academic_year,section_id=sec_id,student_id_id__in=StudentDetail.objects.filter(school_id=school_id))\n subject = AssignSubjectTeacher.objects.filter(section_id=sec_id,class_id__in=Class.objects.filter(academic_year=academic_year,class_id=int(cls_id)),staff_id=teacher_id)\n return render(request,'add_student_marks.html',{'exam':exam,'student':student,'subject':subject,'student_reg':rc, 'mark' :mark })\n\ndef chart_example(request):\n return render(request,'chart.html',{})\ndef overall_subject_chart(request):\n cls_id = request.GET.get('cls_id')\n sec_id = request.GET.get('sec_id')\n class_id = int(cls_id)\n section_id = int(sec_id)\n school_id = request.session['schoolname']\n staff_id = request.session['user_id']\n academic_year = request.session['academic_year']\n query = StudentSection.objects.filter(class_id_id=class_id,section_id_id=section_id,\n academic_year=academic_year)\n student_count = query.values('student_id_id').aggregate(count = Count('student_id_id'))\n cursor = connection.cursor()\n subject = '''SELECT COUNT(school_mark.subject_id),SUM(school_mark.mark),COUNT(school_subject.subject_name) from school_class INNER JOIN school_mark ON school_class.class_id=school_mark.class_id\n INNER JOIN school_subject ON school_subject.subject_id=school_mark.subject_id\n where school_class.academic_year='%s' AND school_mark.class_id='%d' AND school_mark.section_id='%d' \n AND school_mark.staff_id='%d' AND school_class.school_id_id='%d' GROUP BY school_mark.subject_id''' % (academic_year,class_id,section_id,int(staff_id),int(school_id))\n sub = cursor.execute(subject)\n row = cursor.fetchall()\n\n return render(request,'overall_subject_chart.html',{'student_count':student_count,'row':row,'query':query})\n\ndef ajax_student_grap(request):\n cls_id = request.POST.get('cls_id')\n sec_id = request.POST.get('sec_id')\n class_id = int(cls_id)\n section_id = int(sec_id)\n school_id = request.session['schoolname']\n staff_id = request.session['user_id']\n academic_year = request.session['academic_year']\n query = StudentSection.objects.filter(class_id_id=class_id,section_id_id=section_id,\n academic_year=academic_year)\n student_count = query.values('student_id_id').aggregate(count = Count('student_id_id'))\n cursor = connection.cursor()\n subject = '''SELECT COUNT(school_mark.subject_id),SUM(school_mark.mark),school_subject.subject_name,COUNT(school_mark.student_reg_no),school_subject.subject_id from school_class INNER JOIN school_mark ON school_class.class_id=school_mark.class_id\n INNER JOIN school_subject ON school_subject.subject_id=school_mark.subject_id\n where school_class.academic_year='%s' AND school_mark.class_id='%d' AND school_mark.section_id='%d' \n AND school_mark.staff_id='%d' AND school_class.school_id_id='%d' GROUP BY school_subject.subject_id''' % (academic_year,class_id,section_id,int(staff_id),int(school_id))\n sub = cursor.execute(subject)\n row = cursor.fetchall()\n\n data = {\n 'row':row,\n 'student_count':student_count\n }\n return JsonResponse(data)\n\ndef single_student_subject_marks_chart(request,pk):\n\tschool_id=request.session['schoolname']\n\tstudent_name = StudentDetail.objects.filter(register_number=pk)\n\tcursor = connection.cursor()\n\tpost = ''' SELECT SUM(school_mark.mark),school_mark.subject_id\n\tfrom school_mark INNER JOIN school_subject ON \n\tschool_mark.subject_id=school_subject.subject_id where school_mark.student_reg_no='%s' GROUP BY school_mark.subject_id ''' % (pk)\n\tsub = cursor.execute(post)\n\trow = cursor.fetchall()\n\tlenth = len(row)\n\n\tsubject_name = '''SELECT COUNT(school_subject.subject_id),school_subject.subject_name,school_subject.subject_id from school_mark INNER JOIN school_subject ON \n\tschool_mark.subject_id=school_subject.subject_id where school_mark.student_reg_no='%s' \n\tGROUP BY school_subject.subject_name,school_subject.subject_id ''' % (pk)\n\tquery = cursor.execute(subject_name)\n\tsub_name =cursor.fetchall()\n\n\treturn render(request,'single_student_subject_marks_chart.html',{'mark':row,'student_name':student_name,'lenth':lenth,'subject_name':sub_name})\ndef student_subject_marks_chart(request,pk):\n student_name = StudentDetail.objects.filter(register_number=pk)\n cursor = connection.cursor()\n mark = ''' SELECT SUM(school_mark.mark),school_mark.subject_id\n from school_mark INNER JOIN school_subject ON \n school_mark.subject_id=school_subject.subject_id where school_mark.student_reg_no='%s' GROUP BY school_mark.subject_id ''' % (pk)\n sub = cursor.execute(mark)\n row = cursor.fetchall()\n lenth = len(row)\n subject_name = '''SELECT COUNT(school_subject.subject_id),school_subject.subject_name,school_subject.subject_id from school_mark INNER JOIN school_subject ON \n school_mark.subject_id=school_subject.subject_id where school_mark.student_reg_no='%s' \n GROUP BY school_subject.subject_name,school_subject.subject_id ''' % (pk)\n query = cursor.execute(subject_name)\n sub_name =cursor.fetchall()\n return render(request,'student_subject_marks_chart.html',{'mark':row,'student_name':student_name,'lenth':lenth,'subject_name':sub_name})\ndef edit_students_mark(request,pk):\n staff_id=request.session['user_id']\n academic_year = request.session['academic_year']\n cursor = connection.cursor()\n mark = '''SELECT * from school_mark INNER JOIN school_class ON school_mark.class_id=school_class.class_id\n where school_mark.student_reg_no='%s' AND school_mark.staff_id='%d' \n AND school_class.academic_year='%s' ''' % (pk,int(staff_id),academic_year)\n query = cursor.execute(mark)\n row = cursor.fetchall()\n return render(request,'edit_students_mark.html',{'row':row})\n\ndef ajax_subject_count(request):\n student_reg_no = request.GET.get('ids')\n cursor = connection.cursor()\n post = ''' SELECT SUM(school_subject.subject_id),school_subject.subject_name,school_subject.subject_id\n from school_mark INNER JOIN school_subject ON \n school_mark.subject_id=school_subject.subject_id where school_mark.student_reg_no='%s' GROUP BY school_subject.subject_id''' % (student_reg_no)\n sub = cursor.execute(post)\n row = cursor.fetchall()\n\n post2 = ''' SELECT COUNT(school_mark.subject_id) from school_mark INNER JOIN school_subject ON \n school_mark.subject_id=school_subject.subject_id where school_mark.student_reg_no='%s' GROUP BY school_mark.subject_id''' % (student_reg_no)\n sub1 = cursor.execute(post2)\n row1 = cursor.fetchall()\n\n lenth = len(row)\n data = {\n 'lenth':lenth,\n 'row':row,\n 'row1':row1\n }\n return JsonResponse(data)\ndef teacher_class_diary(request):\n school_id=request.session['schoolname']\n academic_year=request.session['academic_year']\n staff_id=request.session['user_id']\n query = AssignSubjectTeacher.objects.filter(staff_id_id=staff_id,academic_year=academic_year).select_related('class').select_related('section')\n query = query.values('class_id', 'section_id','class_id__class_name','section_id__section_name').annotate(count = Count('section_id'), count2= Count('class_id')).order_by('class_id__class_name')\n return render(request,'teacher_class_diary.html',{'staff':query})\n\ndef manage_exam_mark_chart(request):\n school_id=request.session['schoolname']\n academic_year=request.session['academic_year']\n staff_id=request.session['user_id']\n query = AssignSubjectTeacher.objects.filter(staff_id_id=staff_id,academic_year=academic_year)\n query = query.values('class_id', 'section_id','class_id__class_name','section_id__section_name').annotate(count = Count('class_id')).order_by('class_id__class_name')\n return render(request,'manage_exam_mark_chart.html',{'staff':query})\n\ndef particular_exam_mark_chart(request):\n if request.GET.get('cls_id') and request.GET.get('sec_id'):\n cls_id = request.GET.get('cls_id')\n class_id = int(cls_id)\n sec_id = request.GET.get('sec_id')\n section_id = int(sec_id)\n staff_id = request.session['user_id']\n cursor = connection.cursor()\n post = ''' SELECT school_subject.subject_name,COUNT(school_subject.subject_id),school_subject.subject_id from school_subject INNER JOIN \n school_mark ON school_subject.subject_id = school_mark.subject_id\n where school_mark.class_id='%d' AND school_mark.section_id='%d'\n AND school_mark.staff_id='%d' GROUP BY school_subject.subject_id''' % (class_id,section_id,staff_id)\n query = cursor.execute(post)\n row = cursor.fetchall()\n\n query = Mark.objects.filter(staff_id=staff_id,class_id=class_id,section_id=section_id).select_related('subject').select_related('exam')\n query = query.values('exam_id', 'subject_id').annotate(\n count= Count('exam_id'),count1= Count('subject_id'), count3 = Sum('mark')).order_by('exam_id','subject_id')\n\n student_count = ''' SELECT COUNT(school_mark.student_reg_no) from school_mark \n where school_mark.class_id='%d' AND school_mark.section_id='%d'\n AND school_mark.staff_id='%d' GROUP BY school_mark.student_reg_no ''' % (class_id,section_id,staff_id)\n cnt = cursor.execute(student_count)\n row1 = cursor.fetchall()\n length = len(row1)\n\n return render(request,'particular_exam_mark_chart.html',{'row':row,'row3':query,'cnt':length})\n else:\n return redirect('manage_exam_mark_chart')\ndef get_exam_name(request):\n cls_id = request.POST.get('cls_id')\n sec_id = request.POST.get('sec_id')\n class_id = int(cls_id)\n section_id = int(sec_id)\n cursor = connection.cursor()\n post = ''' SELECT COUNT(school_exam.exam_id),school_exam.exams from school_mark INNER JOIN \n school_exam ON school_mark.exam_id = school_exam.exam_id\n where school_mark.class_id='%d' AND school_mark.section_id='%d'\n GROUP BY school_exam.exam_id''' % (class_id,section_id)\n query = cursor.execute(post)\n row = cursor.fetchall()\n\n data = {\n 'row':row\n }\n return JsonResponse(data)\n\ndef select_exam(request):\n cls_id = request.POST.get('class_id')\n class_id = int(cls_id)\n sec_id = request.POST.get('section_id')\n section_id = int(sec_id)\n staff_id = request.session['user_id']\n cursor = connection.cursor()\n sql= ''' SELECT * from school_exam where school_exam.class_id_id='%d' AND school_exam.section_id_id='%d'\n ''' % (class_id,section_id)\n res = cursor.execute(sql)\n exam_name = cursor.fetchall()\n post = ''' SELECT COUNT(school_mark.exam_id) ,COUNT(school_mark.subject_id),SUM(school_mark.mark),COUNT(school_mark.student_reg_no) from school_mark where school_mark.class_id='%d' AND school_mark.section_id='%d'\n AND school_mark.staff_id='%d' GROUP BY school_mark.subject_id''' % (class_id,section_id,staff_id)\n query = cursor.execute(post)\n row = cursor.fetchall()\n data = {\n 'row':row,\n 'exam_name':exam_name\n }\n return JsonResponse(data)\n\ndef select_school_exam(request):\n cls_id = request.POST.get('class_id')\n class_id = int(cls_id)\n sec_id = request.POST.get('section_id')\n section_id = int(sec_id)\n cursor = connection.cursor()\n sql= ''' SELECT school_exam.class_id_id,school_exam.section_id_id,school_exam.exam_id,school_exam.exams from school_exam where school_exam.class_id_id='%d' AND school_exam.section_id_id='%d'\n ''' % (class_id,section_id)\n res = cursor.execute(sql)\n exam_name = cursor.fetchall()\n data = {\n 'exam_name':exam_name\n }\n return JsonResponse(data)\n\ndef select_school_details(request):\n school_id = request.session['schoolname']\n cursor = connection.cursor()\n sql= ''' SELECT * from school_schooldetail where school_schooldetail.id='%d' ''' % (school_id)\n res = cursor.execute(sql)\n school_details = cursor.fetchall()\n data ={\n 'school_details':school_details\n }\n return JsonResponse(data)\n\ndef search_student_marks_ajax(request):\n cls_id = request.POST.get('class_id')\n class_id = int(cls_id)\n sec_id = request.POST.get('section_id')\n section_id = int(sec_id)\n ex_id = request.POST.get('exam_id')\n exam_id = int(ex_id)\n student_reg_no = request.POST.get('student_reg_no')\n cursor = connection.cursor()\n stud = '''SELECT * from school_studentdetail'''\n result = cursor.execute(stud)\n student_id = cursor.fetchall()\n\n if student_reg_no == \"\":\n sql= ''' SELECT COUNT(school_mark.student_reg_no),SUM(school_mark.mark),school_mark.student_reg_no,COUNT(school_mark.subject_id) from school_mark where school_mark.class_id='%d' AND school_mark.section_id='%d' \n AND school_mark.exam_id='%d' OR school_mark.student_reg_no='%s' GROUP BY school_mark.student_reg_no''' % (class_id,section_id,exam_id,student_reg_no)\n res = cursor.execute(sql)\n mark_details = cursor.fetchall()\n data ={\n 'mark_details':mark_details,\n 'student_id': student_id\n }\n else:\n sql= ''' SELECT COUNT(school_mark.student_reg_no),SUM(school_mark.mark),school_mark.student_reg_no,COUNT(school_mark.subject_id) from school_mark where school_mark.class_id='%d' AND school_mark.section_id='%d' \n AND school_mark.exam_id='%d' AND school_mark.student_reg_no='%s' GROUP BY school_mark.student_reg_no''' % (class_id,section_id,exam_id,student_reg_no)\n res = cursor.execute(sql)\n mark_details = cursor.fetchall()\n data ={\n 'mark_details':mark_details,\n 'student_id': student_id\n }\n return JsonResponse(data)\n\ndef landing_page(request):\n return render(request,'home.html',{})\nfrom django.db.models.query import QuerySet\ndef student_class_mark(request,class_id,section_id,exam_id):\n if request.method == \"POST\":\n ex_id = request.POST.get('ex_id')\n stud_id = request.POST.get('stud_id')\n exam_id = int(ex_id)\n student_id = int(stud_id)\n mark_delete = Mark.objects.filter(exam_id=exam_id,student_reg_no=student_id)\n mark_delete.delete()\n school_id = request.session['schoolname']\n staff_id = request.session['user_id']\n academic_year = request.session['academic_year']\n cursor = connection.cursor()\n\n stud_ids = ''' SELECT school_mark.student_reg_no, COUNT(school_mark.student_reg_no) from school_mark INNER JOIN\n school_studentdetail ON school_mark.student_reg_no=school_studentdetail.register_number\n WHERE school_mark.class_id='%d' AND school_mark.section_id='%d'\n AND school_mark.exam_id='%d' GROUP BY school_mark.student_reg_no ''' % (class_id,section_id,exam_id)\n exams_id = cursor.execute(stud_ids)\n exams_name = cursor.fetchall()\n\n a = Exam.objects.filter(exam_id=exam_id)\n subject_name = AssignSubjectTeacher.objects.filter(class_id=class_id,section_id=section_id,staff_id=staff_id).order_by('subject_id')\n results = Mark.objects.filter(exam_id=exam_id,class_id=class_id,section_id=section_id).select_related('exam').select_related('studentsection')\n cnt = results.values('student_reg_no','exam_id','mark').annotate(count=Count('student_reg_no'),count1=Count('exam_id'))\n\n return render(request,'student_class_mark.html',{'exams_name':exams_name,'a':a,'results':cnt,'subject_name':subject_name})\ndef mark_already_exist(request):\n\tcls_id = request.POST.getlist('class_id[]')\n\tsec_id = request.POST.getlist('section_id[]')\n\tstaff_id = request.POST.getlist('staff_id[]')\n\tstudent_id = request.POST.getlist('stud_reg_no[]')\n\tsubject_id = request.POST.getlist('subject_id[]')\n\tex_id = request.POST.get('exams')\n\texam_id = int(ex_id)\n\tlength = len(subject_id)\n\tcursor = connection.cursor()\n\tfor i in range(0,length):\n\t\tif subject_id[i]!= \"\":\n\t\t\tpost = '''SELECT * from school_mark where school_mark.class_id='%d' AND school_mark.section_id='%d' AND \n\t\t\tschool_mark.subject_id='%d' AND school_mark.exam_id='%d' AND school_mark.staff_id='%d' AND \n\t\t\tschool_mark.student_reg_no='%s' ''' % (int(cls_id[i]),int(sec_id[i]),int(subject_id[i]),exam_id,int(staff_id[i]),student_id[i])\n\t\t\tquery = cursor.execute(post)\n\t\t\trow = cursor.fetchall()\n\t\t\tdata = {\n\t\t\t'msg':\"Mark Already Exist\",\n\t\t\t'row':row,\n\t\t\t'length':student_id\n\t\t\t}\n\t\t\treturn JsonResponse(data)\ndef choose_exam(request):\n\tif request.GET.get('cls_id') and request.GET.get('sec_id'):\n\t\tcls_id = request.GET.get('cls_id')\n\t\tsec_id = request.GET.get('sec_id')\n\t\tclass_id = int(cls_id)\n\t\tsection_id = int(sec_id)\n\t\tschool_id = request.session['schoolname']\n\t\texams = Exam.objects.filter(class_id_id=class_id,section_id_id=section_id,school_id_id=school_id)\n\t\treturn render(request,'choose_exam.html',{'exams':exams})\n\telse:\n\t\treturn redirect(\"manage_students_marks\")\ndef exam_mark_chart(request,cls_id,sec_id,pk):\n stud_id = pk\n school_id = request.session['schoolname']\n exams = Exam.objects.filter(class_id_id=cls_id,section_id_id=sec_id,school_id_id=school_id)\n return render(request,'exam_mark_chart.html',{'exams':exams,'pk':pk})\ndef exam_chart(request,cls_id,sec_id,exam_id,stud_id):\n student_count = Mark.objects.filter(exam_id=exam_id,class_id=cls_id,section_id=sec_id)\n stud_cnt = student_count.values('student_reg_no').annotate(count=Count('student_reg_no'))\n length = len(stud_cnt)\n sub_name = Subject.objects.filter(class_id=cls_id,section_id=sec_id)\n sub_filter = sub_name.values('subject_id','subject_name').annotate(count=Count('subject_id'))\n results = Mark.objects.filter(exam_id=exam_id,class_id=cls_id,section_id=sec_id)\n cnt = results.values('subject_id','exam_id').annotate(count=Count('subject_id'),total=Sum('mark')).order_by('subject_id')\n cls_name = Class.objects.filter(class_id=cls_id)\n\n return render(request,'exam_chart.html',{'cnt':cnt,'stud_cnt':length,'sub_filter':sub_name,'cls_name':cls_name})\ndef sub_ajax(request):\n class_id = request.POST.get('class_id')\n section_id = request.POST.get('section_id')\n cursor = connection.cursor()\n sub_id = ''' SELECT * from school_subject\n WHERE school_subject.class_id_id='%d' AND school_subject.section_id_id='%d'\n order by school_subject.subject_id''' % (int(class_id),int(section_id))\n post = cursor.execute(sub_id)\n sub_name = cursor.fetchall()\n\n data = {\n 'row':sub_name\n }\n return JsonResponse(data)\ndef select_exams(request):\n\tif request.GET.get('cls_id') and request.GET.get('sec_id'):\n\t\tcls_id = request.GET.get('cls_id')\n\t\tsec_id = request.GET.get('sec_id')\n\t\tclass_id = int(cls_id)\n\t\tsection_id = int(sec_id)\n\t\tschool_id = request.session['schoolname']\n\t\texams = Exam.objects.filter(class_id_id=class_id,section_id_id=section_id,school_id_id=school_id)\n\t\treturn render(request,'select_exams.html',{'exams':exams})\n\telse:\n\t\treturn redirect(\"manage_students_marks\")\ndef mark_ajax(request):\n class_id = request.POST.get('class_id')\n section_id = request.POST.get('section_id')\n exam_id = request.POST.get('exam_id')\n cursor = connection.cursor()\n stud_ids = ''' SELECT * from school_mark \n WHERE school_mark.class_id='%d' AND school_mark.section_id='%d'\n AND school_mark.exam_id='%d' order by school_mark.subject_id''' % (int(class_id),int(section_id),int(exam_id))\n exams_id = cursor.execute(stud_ids)\n exams_name = cursor.fetchall()\n\n data = {\n 'row':exams_name\n }\n return JsonResponse(data)\ndef delete_mark(request):\n stud_id = request.POST.get('stud_id')\n exam_id = request.POST.get('ex_id')\n cursor = connection.cursor()\n stud_ids = ''' DELETE from school_mark \n WHERE school_mark.exam_id='%d' AND school_mark.student_reg_no='%s' ''' % (int(exam_id),stud_id)\n exams_id = cursor.execute(stud_ids)\n\n data = {\n 'row':'deleted'\n }\n return JsonResponse(data)\ndef view_student_mark(request):\n stud_id = request.session['student_id']\n student = StudentSection.objects.filter(student_id=stud_id)\n return render(request,'view_student_mark.html',{'stud_id':student})\ndef exam_list(request):\n cls_id = request.POST.get('cls_id')\n sec_id = request.POST.get('sec_id')\n cursor = connection.cursor()\n stud_ids = ''' SELECT * from school_exam \n WHERE school_exam.class_id_id='%d' AND school_exam.section_id_id='%d'\n order by school_exam.exam_id''' % (int(cls_id),int(sec_id))\n exams_id = cursor.execute(stud_ids)\n exams_name = cursor.fetchall()\n data = {\n 'row':exams_name\n }\n return JsonResponse(data)\ndef view_single_student_particular_exam_mark(request):\n tot = 0\n Register_number = request.session['register_number']\n exam_id =\"\"\n if request.GET.get('id'):\n exam_id = request.GET.get('id')\n cursor = connection.cursor()\n Mark = ''' SELECT school_StudentDetail.student_name, school_Mark.*,school_Mark.subject_id, school_subject.subject_name , school_Exam.exams from school_Mark\n LEFT JOIN school_subject ON school_Mark.subject_id=school_subject.subject_id\n LEFT JOIN school_Exam ON school_Mark.exam_id=school_Exam.exam_id\n LEFT JOIN school_StudentDetail ON school_StudentDetail.register_number=school_Mark.student_reg_no\n WHERE school_Mark.exam_id='%d' AND school_Mark.student_reg_no='%s'\n ''' % (int(exam_id),Register_number)\n Mark_id = cursor.execute(Mark)\n Marks = cursor.fetchall()\n length = len(Marks)\n for i in Marks:\n tot+=i[3]\n average = tot/length\n if(average>=91 and average<=100):\n grade = \"A\"\n elif(average>=81 and average<=90):\n grade = \"B\"\n elif(average>=71 and average<=80):\n grade = \"C\"\n elif(average>=61 and average<=70):\n grade = \"D\"\n elif(average>=51 and average<=60):\n grade = \"E\"\n elif(average>=41 and average<=50):\n grade = \"O\"\n elif(average>=0 and average<=40):\n grade = \"F\"\n if grade == \"F\":\n result = \"Fail\"\n else:\n result = \"Pass\"\n percent = format(average, '.1f')\n return render(request,'view_single_student_particular_exam_mark.html',{'Register_number':Register_number,'exam':exam_id,'Marks':Marks,'tot':tot,'length':length,'percentage':percent,'grade':grade,'result':result})", "sub_path": "Trackmyschool/school/views - Copy.py", "file_name": "views - Copy.py", "file_ext": "py", "file_size_in_byte": 50224, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "19", "api": [{"api_name": "django.shortcuts.redirect", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 30, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 47, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 47, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 64, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 66, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 74, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 76, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 78, "usage_type": "call"}, {"api_name": "django.contrib.sessions.models.Session.objects.all", "line_number": 81, "usage_type": "call"}, {"api_name": "django.contrib.sessions.models.Session.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django.contrib.sessions.models.Session", "line_number": 81, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 84, "usage_type": "call"}, {"api_name": "django.contrib.sessions.models.Session.objects.all", "line_number": 88, "usage_type": "call"}, {"api_name": "django.contrib.sessions.models.Session.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "django.contrib.sessions.models.Session", "line_number": 88, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 91, "usage_type": "call"}, {"api_name": "django.contrib.sessions.models.Session.objects.all", "line_number": 95, "usage_type": "call"}, {"api_name": "django.contrib.sessions.models.Session.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "django.contrib.sessions.models.Session", "line_number": 95, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 98, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 107, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 114, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 117, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 121, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 128, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 131, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 133, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 139, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 142, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 146, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 150, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 152, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 158, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 161, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 165, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 172, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 174, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 184, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 184, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 189, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 191, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 197, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 200, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 204, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 215, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 218, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 220, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 227, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 230, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 234, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 244, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 244, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 248, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 250, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 256, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 259, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 263, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 265, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 267, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 269, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 278, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 282, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 284, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 290, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 293, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 298, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 301, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 304, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 309, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 309, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 316, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 321, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 325, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 325, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 332, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 347, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 359, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 361, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 374, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 374, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 377, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 377, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 382, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 384, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 401, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 403, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 405, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 412, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 412, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 415, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 417, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 423, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 430, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 430, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 440, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 447, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 447, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 459, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 463, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 463, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 475, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 480, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 480, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 488, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 494, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 494, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 505, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 510, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 510, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 519, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 528, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 528, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 538, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 544, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 552, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 552, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 555, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 563, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 569, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 571, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 583, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 587, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 587, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 608, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 619, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 624, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 627, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 638, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 639, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 639, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 647, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 659, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 660, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 660, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 672, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 677, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 677, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 691, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 694, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 694, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 706, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 710, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 710, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 716, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 720, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 720, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 738, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 744, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 745, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 752, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 753, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 762, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 762, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 772, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 772, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 781, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 783, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 789, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 789, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 800, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 808, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 808, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 821, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 828, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 828, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 836, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 840, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 840, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 847, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 857, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 857, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 880, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 883, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 896, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 896, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 908, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 910, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 920, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 920, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 933, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 942, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 944, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 949, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 952, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 955, "usage_type": "call"}, {"api_name": "django.db.models.Count", "line_number": 957, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 957, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 960, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 964, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 964, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 974, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 983, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 985, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 990, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 990, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 1000, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 1004, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 1004, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 1012, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 1016, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 1020, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 1020, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 1029, "usage_type": "call"}, {"api_name": "django.db.connection.cursor", "line_number": 1036, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 1036, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 1068, "usage_type": "call"}]}