diff --git "a/1055.jsonl" "b/1055.jsonl" new file mode 100644--- /dev/null +++ "b/1055.jsonl" @@ -0,0 +1,448 @@ +{"seq_id": "183451473", "text": "import matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\n\ndef get_max(data1, data2):\n array = np.zeros(2)\n array[0] = np.amax(data1)\n array[1] = np.amax(data2)\n\n return np.amax(array)\n\ndef get_min(data1, data2):\n array = np.zeros(2)\n array[0] = np.amin(data1)\n array[1] = np.amin(data2)\n\ndef plot_1figure(data, title=\"fig\", figure_name=\"fig\", save=True):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(data)\n\n max_val = np.amax(data) #縦軸の最大値決定のため\n min_val = np.amin(data)\n if min_val < 0:\n ax.set_ylim(min_val + min_val / 10, max_val + max_val / 10) #軸設定\n else:\n ax.set_ylim(0 - min_val / 10, max_val + max_val / 10) #軸設定\n ax.set_title(title) #グラフ名決め\n\n fig.tight_layout() # タイトルとラベルが被るのを解消\n\n if save==True:\n figure_name = figure_name + \".png\"\n plt.savefig(figure_name) # save as png\n\n plt.show() # グラフを画面表示\n\n# def save_animation(array, title=\"animation\", animation_name=\"animation\"):\n# # アニメーション保存\n# max_val = np.amax(array)\n# fig = plt.figure() # figure objectを取得\n# ax1 = fig.add_subplot(1, 1, 1)\n# ims = []\n# for i in range(array.shape[0]):\n# artist1 = ax1.plot(array[i, :], \"b\")\n# ax1.set_title(title) # グラフ名決め\n# ax1.set_ylim(0, max_val + max_val / 10) # 軸設定\n# ims.append(artist1)\n#\n# ani = animation.ArtistAnimation(fig, ims, interval=10)\n# animation_name += \".gif\"\n# ani.save(animation_name, writer=\"pillow\")\n\n# def plot_2figure(data1, data2, plot_name1=\"fig1\", plot_name2=\"fig2\", figure_name=\"fig\"):\n# # plot 2fig\n# fig = plt.figure()\n# ax1 = fig.add_subplot(2, 1, 1)\n# ax2 = fig.add_subplot(2, 1, 2)\n#\n# ax1.plot(data1)\n# ax2.plot(data2)\n#\n# max_val = get_max(data1, data2) #縦軸の最大値決定のため\n# ax1.set_ylim(0, max_val + max_val / 10) #軸設定\n# ax2.set_ylim(0, max_val + max_val / 10)\n#\n# ax1.set_title(plot_name1) #グラフ名決め\n# ax2.set_title(plot_name2)\n#\n# fig.tight_layout() # タイトルとラベルが被るのを解消\n#\n# figure_name = figure_name + \".png\"\n# plt.savefig(figure_name) # save as png\n#\n#\n# def save_2plot(array1, array2, title1, title2, file_name):\n# # アニメーション保存\n# max_val = get_max(array1, array2)\n# fig = plt.figure() # figure objectを取得\n# ax1 = fig.add_subplot(2, 1, 1)\n# ax2 = fig.add_subplot(2, 1, 2)\n# ims = []\n# for i in range(array1.shape[0]):\n# im1, = ax1.plot(array1[i, :], \"b\")\n# im2, = ax2.plot(array2[i, :], \"b\")\n# ax1.set_title(title1) # グラフ名決め\n# ax2.set_title(title2)\n# ax1.set_ylim(0, max_val + max_val / 10) # 軸設定\n# ax2.set_ylim(0, max_val + max_val / 10)\n# fig.tight_layout()\n# ims.append([im1, im2])\n# ani = animation.ArtistAnimation(fig, ims, interval=10)\n# file_name += \".gif\"\n# ani.save(file_name, writer=\"pillow\")", "sub_path": "utils/PlotFigure.py", "file_name": "PlotFigure.py", "file_ext": "py", "file_size_in_byte": 3218, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "numpy.zeros", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.amax", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "21465644", "text": "from imp import reload\n\nfrom PIL import Image, ImageFont, ImageDraw # 从PIL库导入所需模块\nimport sys\n\nreload(sys) # 必须要reload\n#sys.setdefaultencoding('utf-8') # 字符编码改为utf8\n\nheadPath = r\"/Users/zlinzhang/Downloads/\"\n# 头像图片路径\noutputPath = r\"/Users/zlinzhang/Downloads/\"\n# 处理后输出路径\nfontPath = r\"/System/Library/Fonts\"\n# 字体路径\nheadFile = \"head.jpg\" # 头像文件\noutFile = \"output.jpg\" # 输出文件\n# 打开图片,建立画布\nimage = Image.open(headPath + headFile, 'r')\ndraw = ImageDraw.Draw(image)\n\n# 由图片大小确定字体大小\nfontsize = min(image.size) / 4\n\n# 增加文字\nfontobj = ImageFont.truetype(font=fontPath + \"AdobeHeitiStd-Regular.otf\", size=fontsize, index=0, encoding='',\n filename=None) # 实例字体对象\ndraw.text((image.size[0] - fontsize, 0), text=\"5\", fill=(255, 0, 0), font=fontobj,\n anchor=None) # 用draw对象的text()方法添加文字\nimage.save(outputPath + outFile) #", "sub_path": "Addnumber.py", "file_name": "Addnumber.py", "file_ext": "py", "file_size_in_byte": 1011, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "imp.reload", "line_number": 6, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 18, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 18, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 19, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 19, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "212785868", "text": "#from maths import *\r\nimport sklearn\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nimport numpy\r\n\r\n\r\ndef averaged_representative_classification(train_dict_vec, test_vectors, sim_measure):\r\n average_dict_vec = {}\r\n for i in train_dict_vec: # label i in dictionary ######### print(train_dict_vec[i])\r\n #average = average_vector(train_dict_vec[i])\r\n average_dict_vec[i] = numpy.mean(train_dict_vec[i])\r\n test_dict_vec = {}\r\n predictions = []\r\n for test_vec in test_vectors:\r\n sim = [0, -1]\r\n for label in train_dict_vec:\r\n label_vector = train_dict_vec[label]\r\n if sim_measure == \"cosine\":\r\n temp_sim = sklearn.metrics.pairwise.cosine_similarity(label_vector, test_vec)\r\n else:\r\n print(\"ERROR: Undefined measure\")\r\n return None\r\n if temp_sim > sim[1]:\r\n sim = [label, temp_sim]\r\n if sim[0] in test_dict_vec:\r\n test_dict_vec[sim[0]].append(test_vec)\r\n else:\r\n test_dict_vec[sim[0]] = [test_vec]\r\n predictions.append(sim)\r\n return test_dict_vec, predictions\r\n\r\n", "sub_path": "master_thesis/classify.py", "file_name": "classify.py", "file_ext": "py", "file_size_in_byte": 1166, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "numpy.mean", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "21037002", "text": "# Write a Python file that uploads an image to your \r\n# Twitter account. Make sure to use the \r\n# hashtags #UMSI-206 #Proj3 in the tweet.\r\n\r\n# You will demo this live for grading.\r\n\r\nimport tweepy\r\nimport nltk\r\n\r\n# Unique code from Twitter\r\naccess_token = \"783672422-f8n5mxjJdPinhDJNpoj4EPeMmyIbf5TQqVUjEtjz\"\r\naccess_token_secret = \"VQHV0tDXRRaaYkW2nq9ZXTbabnEHlXESKJn79P9itBwnd\"\r\nconsumer_key = \"cCxQmwhltBPqkV8njvc9xaZ94\"\r\nconsumer_secret = \"TAjrbPAm2ElfJzfpDWuXGHAzzGI1kpsbGUkn8vcjzke3dw89KP\"\r\n\r\n# Boilerplate code here\r\nauth = tweepy.OAuthHandler(consumer_key,consumer_secret)\r\nauth.set_access_token(access_token,access_token_secret)\r\n\r\napi = tweepy.API(auth)\r\n\r\nimage = input('Enter location of image: ')\r\nmessage = input('Enter tweet: ')\r\napi.update_with_media(image, status = message)\r\n\r\n\r\n", "sub_path": "HW3-StudentCopy/twitterhw3a.py", "file_name": "twitterhw3a.py", "file_ext": "py", "file_size_in_byte": 798, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 17, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "597368247", "text": "# uncompyle6 version 3.6.7\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/pyams_viewlet/provider.py\n# Compiled at: 2020-02-18 20:07:12\n# Size of source mod 2**32: 4652 bytes\n__doc__ = 'PyAMS_viewlet.provider module\\n\\nThis module provides the \"provider:\" TALES expression, which allows inclusion of any registered\\ncontent provider into a Chameleon or ZPT template.\\n'\nimport re\nfrom chameleon.astutil import Symbol\nfrom chameleon.tales import StringExpr\nfrom zope.contentprovider.interfaces import BeforeUpdateEvent, ContentProviderLookupError, IContentProvider\nfrom zope.contentprovider.tales import addTALNamespaceData\nfrom zope.location.interfaces import ILocation\nfrom pyams_utils.tales import ContextExprMixin\n__docformat__ = 'restructuredtext'\nFUNCTION_EXPRESSION = re.compile('(.+)\\\\((.+)\\\\)', re.MULTILINE | re.DOTALL)\nARGUMENTS_EXPRESSION = re.compile('[^(,)]+')\nCONTENT_PROVIDER_NAME = re.compile('([A-Za-z0-9_\\\\-\\\\.]+)')\n\ndef render_content_provider(econtext, name):\n \"\"\"TALES provider: content provider\n\n This TALES expression is used to render a registered \"content provider\", which\n is an adapter providing IContentProvider interface; adapter lookup is based on\n current context, request and view.\n\n The requested provider can be called with our without arguments, like in\n ${structure:provider:my_provider} or ${structure:provider:my_provider(arg1, arg2)}.\n In the second form, arguments will be passed to the \"update\" method; arguments can be\n static (like strings or integers), or can be variables defined into current template\n context; other Python expressions including computations or functions calls are actually\n not supported, but dotted syntax is supported to access inner attributes of variables.\n\n Provider arguments can be passed by position but can also be passed by name, using classic\n syntax as in ${structure:provider:my_provider(arg1, arg3=var3)}\n \"\"\"\n\n def get_value(econtext, arg):\n \"\"\"Extract argument value from context\n\n Extension expression language is quite simple. Values can be given as\n positioned strings, integers or named arguments of the same types.\n \"\"\"\n arg = arg.strip()\n if arg.startswith('\"') or arg.startswith(\"'\"):\n return arg[1:-1]\n if '=' in arg:\n key, value = arg.split('=', 1)\n value = get_value(econtext, value)\n return {key.strip(): value}\n try:\n arg = int(arg)\n except ValueError:\n args = arg.split('.')\n result = econtext.get(args.pop(0))\n for arg in args:\n result = getattr(result, arg)\n\n return result\n else:\n return arg\n\n name = name.strip()\n context = econtext.get('context')\n request = econtext.get('request')\n view = econtext.get('view')\n args, kwargs = [], {}\n func_match = FUNCTION_EXPRESSION.match(name)\n if func_match:\n name, arguments = func_match.groups()\n for arg in map(lambda x: get_value(econtext, x), ARGUMENTS_EXPRESSION.findall(arguments)):\n if isinstance(arg, dict):\n kwargs.update(arg)\n else:\n args.append(arg)\n\n else:\n match = CONTENT_PROVIDER_NAME.match(name)\n if match:\n name = match.groups()[0]\n else:\n raise ContentProviderLookupError(name)\n registry = request.registry\n provider = registry.queryMultiAdapter((context, request, view), IContentProvider, name=name)\n if provider is None:\n raise ContentProviderLookupError(name)\n if ILocation.providedBy(provider):\n provider.__name__ = name\n addTALNamespaceData(provider, econtext)\n registry.notify(BeforeUpdateEvent(provider, request))\n provider.update(*args, **kwargs)\n return provider.render()\n\n\nclass ProviderExpr(ContextExprMixin, StringExpr):\n \"\"\"ProviderExpr\"\"\"\n transform = Symbol(render_content_provider)", "sub_path": "pycfiles/pyamtrack-0.1.4-py3-none-manylinux1_x86_64/provider.cpython-35.py", "file_name": "provider.cpython-35.py", "file_ext": "py", "file_size_in_byte": 4102, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "re.DOTALL", "line_number": 16, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 18, "usage_type": "call"}, {"api_name": "zope.contentprovider.interfaces.ContentProviderLookupError", "line_number": 82, "usage_type": "call"}, {"api_name": "zope.contentprovider.interfaces.IContentProvider", "line_number": 84, "usage_type": "argument"}, {"api_name": "zope.contentprovider.interfaces.ContentProviderLookupError", "line_number": 86, "usage_type": "call"}, {"api_name": "zope.location.interfaces.ILocation.providedBy", "line_number": 87, "usage_type": "call"}, {"api_name": "zope.location.interfaces.ILocation", "line_number": 87, "usage_type": "name"}, {"api_name": "zope.contentprovider.tales.addTALNamespaceData", "line_number": 89, "usage_type": "call"}, {"api_name": "zope.contentprovider.interfaces.BeforeUpdateEvent", "line_number": 90, "usage_type": "call"}, {"api_name": "pyams_utils.tales.ContextExprMixin", "line_number": 95, "usage_type": "name"}, {"api_name": "chameleon.tales.StringExpr", "line_number": 95, "usage_type": "name"}, {"api_name": "chameleon.astutil.Symbol", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "646769660", "text": "import numpy as np\nfrom scipy import integrate\nimport matplotlib.pyplot as plt\n\ntime=np.linspace(0,15, 1024)\n\ndef derivN(N, t):\n \"\"\"Return the derivative of the vector N, which represents\n the tuple (N1, N2). \"\"\"\n\n N1, N2 = N\n return np.array([N1*(1 - N1 - .7*N2), N2*(1 - N2 - .3*N1)])\n\ndef coupled(time, init, ax):\n \"\"\"Visualize the system of coupled equations, by passing a timerange and\n initial conditions for the coupled equations.\n\n The initical condition is the value that (N1, N2) will assume at the first\n timestep. \"\"\"\n\n N = integrate.odeint(derivN, init, time)\n ax[0].plot(N[:,0], N[:,1], label='[{:.1f}, {:.1f}]'.format(*init))\n # plots N2 vs N1, with time as an implicit parameter\n l1, = ax[1].plot(time, N[:,0], label='[{:.1f}, {:.1f}]'.format(*init))\n ax[1].plot(time, N[:,1], color=l1.get_color())", "sub_path": "pkj.py", "file_name": "pkj.py", "file_ext": "py", "file_size_in_byte": 854, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.linspace", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "scipy.integrate.odeint", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.integrate", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "380826963", "text": "# -*- coding: utf-8 -*\n\n'''\n数据管理类,把数据库部分拿出来\n\n'''\nimport pymysql\n\nimport logging\n\nlogging.basicConfig(level=logging.INFO) # ,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogging = logging.getLogger(__name__)\n\n\nclass DataManager():\n def connect_db(self):\n return pymysql.connect(host='127.0.0.1',\n port=3306,\n user='root',\n password='',\n database='xinhua',\n charset='utf8')\n\n\n def query_e2019(self,cc2,limit=''):\n sql_str = (\"SELECT distinct(FILE_UUID),txt\"\n + \" FROM e20190313\"\n + \" WHERE txt like '%s' group by FILE_UUID,txt %s\" % (cc2,'limit '+str(limit) if limit!='' else ''))\n logging.info(sql_str)\n\n con = self.connect_db()\n cur = con.cursor()\n cur.execute(sql_str)\n rows = cur.fetchall()\n cur.close()\n con.close()\n return rows\n\n def query_sql(self, sql_str):\n logging.info(sql_str)\n con = self.connect_db()\n cur = con.cursor()\n cur.execute(sql_str)\n rows = cur.fetchall()\n cur.close()\n con.close()\n return rows\n\n# dm=DataManager()\n# print(len(dm.query_e2019('%')))", "sub_path": "dataManager.py", "file_name": "dataManager.py", "file_ext": "py", "file_size_in_byte": 1341, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "logging.basicConfig", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 11, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 29, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "631037903", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*\n\n# ライブラリのインポート\nimport cv2\nimport os\n\ndef mizumashi(path, deg, scale):\n \"\"\"\n 水増し画像を生成する関数\n \"\"\"\n \n image = cv2.imread(path)\n\n # 画像の回転準備\n target = (int(image.shape[1] / 2), int(image.shape[0] / 2))\n matrix = cv2.getRotationMatrix2D(target, deg, scale)\n\n # 画像を変換して返す\n return cv2.warpAffine(image, matrix, (image.shape[1], image.shape[0]))\n\n# 水増し画像を保存するフォルダの名前\ndirname = \"./mizumashi_images\"\n\n# 水増し画像を保存するフォルダがなければ作成\nif not os.path.isdir(dirname):\n os.mkdir(dirname)\n\n# 水増しする画像\nimage_path = \"./original.jpg\"\n\n# 水増し画像を生成(パラメータ: 角度, 拡大率)\nprint(\"水増し画像の生成開始\")\nimage1 = mizumashi(image_path, 45, 1.0)\nimage2 = mizumashi(image_path, 90, 0.7)\nimage3 = mizumashi(image_path, -45, 1.0)\nimage4 = mizumashi(image_path, 0, 1.5)\n\n# 水増し画像を保存\nprint(\"水増し画像を保存({}に保存)\".format(dirname))\ncv2.imwrite(dirname + \"/mizumashi1.jpg\", image1)\ncv2.imwrite(dirname + \"/mizumashi2.jpg\", image2)\ncv2.imwrite(dirname + \"/mizumashi3.jpg\", image3)\ncv2.imwrite(dirname + \"/mizumashi4.jpg\", image4)\n", "sub_path": "mizumashi/mizumashi.py", "file_name": "mizumashi.py", "file_ext": "py", "file_size_in_byte": 1304, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "cv2.imread", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.getRotationMatrix2D", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "106770385", "text": "# Decision Tree(의사 결정 나무) - 분류, 회귀\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pydotplus\r\nfrom matplotlib.colors import ListedColormap\r\nfrom sklearn import datasets\r\nfrom sklearn.metrics import confusion_matrix, classification_report\r\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\r\n\r\n\r\ndef plot_decision_boundary(clf, X, y, axes=[0, 7.5, 0, 3]):\r\n # Data를 scatter plot: x축(petal length), y축(petal width)\r\n plt.scatter(X[y == 0, 0], X[y == 0, 1], c='r')\r\n plt.scatter(X[y == 1, 0], X[y == 1, 1], c='b')\r\n plt.scatter(X[y == 2, 0], X[y == 2, 1], c='g')\r\n # decision boundary(결정 경계)\r\n x_pts = np.linspace(axes[0], axes[1], 100)\r\n y_pts = np.linspace(axes[2], axes[3], 100)\r\n X_pts, Y_pts = np.meshgrid(x_pts, y_pts)\r\n X_new = np.c_[X_pts.ravel(), Y_pts.ravel()] # 예측할 때 사용할 데이터\r\n predicts = clf.predict(X_new).reshape(X_pts.shape) # ML 모델의 예측값\r\n cust_cmap = ListedColormap(['pink', 'aqua', 'greenyellow'])\r\n plt.contourf(X_pts, Y_pts, predicts, cmap=cust_cmap, alpha=0.3)\r\n\r\n\r\nif __name__ == '__main__':\r\n # 데이터 준비\r\n iris = datasets.load_iris()\r\n X = iris.data[:, (2, 3)] # petal length, width 특성 선택\r\n y = iris.target\r\n\r\n # 모델 선택, 생성\r\n tree_clf = DecisionTreeClassifier(random_state=1)\r\n # 모델을 학습 데이터에 fitting - 훈련(학습)\r\n tree_clf.fit(X, y)\r\n # 평가\r\n tree_score = tree_clf.score(X, y) # 정확도(accuracy)\r\n print('tree score:', tree_score)\r\n\r\n y_pred = tree_clf.predict(X) # 예측값\r\n conf_mat = confusion_matrix(y, y_pred)\r\n print(conf_mat)\r\n report = classification_report(y, y_pred,\r\n target_names=iris.target_names)\r\n print(report)\r\n\r\n # Decision Tree의 내용을 그래프로 만들고 저장하기 위해 필요한 패키지\r\n # graphviz - decision tree를 그래프 객체로 만듦.\r\n # pydotplus - graphviz가 만든 객체를 파일, 이미지로 저장.\r\n # 설치방법 1)\r\n # conda install 패키지이름\r\n # 설치방법 2)\r\n # PyCharm -> File -> Settings -> Project -> Project Interpreter\r\n # -> +버튼 -> 패키지 이름 검색, 설치\r\n # 패키지 설치 이후에 graphviz가 설치된 폴더를 환경 설정 경로(path)에 추가\r\n # 환경 설정 변경 이후에는 PyCharm을 다시 실행\r\n\r\n export_graphviz(tree_clf,\r\n out_file='iris.dot',\r\n feature_names=iris.feature_names[2:],\r\n class_names=iris.target_names,\r\n filled=True)\r\n graph = pydotplus.graph_from_dot_file('iris.dot')\r\n graph.write_png('iris.png')\r\n\r\n tree_clf2 = DecisionTreeClassifier(random_state=1, max_depth=2)\r\n tree_clf2.fit(X, y)\r\n print('tree_clf2 score:', tree_clf2.score(X, y))\r\n export_graphviz(tree_clf2,\r\n out_file='iris_depth2.dot',\r\n feature_names=iris.feature_names[2:],\r\n class_names=iris.target_names,\r\n filled=True)\r\n g = pydotplus.graph_from_dot_file('iris_depth2.dot')\r\n g.write_png('iris_depth2.png')\r\n\r\n print('g0 =', 1 - (1/3)**2 * 3)\r\n\r\n tree_clf3 = DecisionTreeClassifier(random_state=42,\r\n max_depth=2)\r\n tree_clf3.fit(X, y)\r\n print('tree_clf3 score:', tree_clf3.score(X, y))\r\n export_graphviz(tree_clf3,\r\n out_file='iris_depth2_42.dot',\r\n feature_names=iris.feature_names[2:],\r\n class_names=iris.target_names,\r\n filled=True)\r\n g = pydotplus.graph_from_dot_file('iris_depth2_42.dot')\r\n g.write_png('iris_depth2_42.png')\r\n\r\n plot_decision_boundary(tree_clf2, X, y)\r\n plt.show()\r\n\r\n plot_decision_boundary(tree_clf3, X, y)\r\n plt.show()\r\n\r\n plot_decision_boundary(tree_clf, X, y)\r\n plt.show()\r\n\r\n", "sub_path": "lab-ml/ch06/ex01_tree.py", "file_name": "ex01_tree.py", "file_ext": "py", "file_size_in_byte": 3967, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "matplotlib.pyplot.scatter", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 21, "usage_type": "attribute"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "sklearn.datasets.load_iris", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 29, "usage_type": "name"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 59, "usage_type": "call"}, {"api_name": "pydotplus.graph_from_dot_file", "line_number": 64, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 70, "usage_type": "call"}, {"api_name": "pydotplus.graph_from_dot_file", "line_number": 75, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 84, "usage_type": "call"}, {"api_name": "pydotplus.graph_from_dot_file", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "241414401", "text": "import os\nfrom datetime import datetime, timedelta\n\nfrom dagster_cron import SystemCronScheduler\n\nfrom dagster import daily_schedule, schedules\n\nRESOURCE_CREDENTIALS = ['DARK_SKY_API_KEY', 'GOOGLE_APPLICATION_CREDENTIALS']\n\n\n@daily_schedule(\n pipeline_name='extract_daily_weather_data_pipeline',\n start_date=datetime(year=2019, month=1, day=1),\n execution_time=(datetime.now() + timedelta(minutes=1)).time(),\n mode='development',\n environment_vars={cred_name: os.environ.get(cred_name) for cred_name in RESOURCE_CREDENTIALS},\n)\ndef daily_ingest_schedule(date):\n unix_seconds_since_epoch = int((date - datetime(year=1970, month=1, day=1)).total_seconds())\n return {\n \"resources\": {\n \"credentials_vault\": {\"config\": {\"environment_variable_names\": [\"DARK_SKY_API_KEY\"]}},\n \"postgres_db\": {\n \"config\": {\n \"postgres_db_name\": \"test\",\n \"postgres_hostname\": \"localhost\",\n \"postgres_password\": \"test\",\n \"postgres_username\": \"test\",\n }\n },\n },\n \"solids\": {\n \"download_weather_report_from_weather_api\": {\n \"inputs\": {\"epoch_date\": {\"value\": unix_seconds_since_epoch}}\n },\n \"insert_weather_report_into_table\": {\n \"config\": {\"index_label\": \"uuid\"},\n \"inputs\": {\"table_name\": {\"value\": \"weather\"}},\n },\n },\n }\n\n\n@schedules(scheduler=SystemCronScheduler)\ndef define_scheduler():\n return [daily_ingest_schedule]\n", "sub_path": "examples/dagster_examples/bay_bikes/schedules.py", "file_name": "schedules.py", "file_ext": "py", "file_size_in_byte": 1580, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "datetime.datetime", "line_number": 19, "usage_type": "call"}, {"api_name": "dagster.daily_schedule", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 16, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "dagster.schedules", "line_number": 44, "usage_type": "call"}, {"api_name": "dagster_cron.SystemCronScheduler", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "561076646", "text": "import subprocess\nimport os\nimport time\nimport requests\nimport sys\n\nsys.path.append(os.path.abspath(os.path.dirname(__file__) + '/../'))\nfrom common.constants import WORDS_BASE_PATH, REQUIREMENTS_BASE_PATH, SRC_BASE_PATH, PROJECT_BASE_PATH, \\\n TEST_ARTICLES_BASE_PATH, WORKERS_COUNT\n\nfrom tests.expected_results import QUERY_RESULTS\n\nSLEEP_BEFORE_REQUESTS = 25\n\n\ndef main():\n subprocess.run(['python3', '-m', 'pip', 'install', '-r', REQUIREMENTS_BASE_PATH])\n for filename in os.listdir(WORDS_BASE_PATH):\n file_path = os.path.join(WORDS_BASE_PATH, filename)\n\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n\n subprocess.run(['redis-cli', 'FLUSHALL'])\n\n childprocs = []\n\n try:\n my_env = os.environ.copy()\n my_env[\"ARTICLES_DIR\"] = TEST_ARTICLES_BASE_PATH\n my_env[\"WORDS_DIR\"] = WORDS_BASE_PATH\n my_env[\"FLASK_ENV\"] = 'development'\n childprocs.append(subprocess.Popen(['python3', 'server.py'], cwd=SRC_BASE_PATH, env=my_env))\n\n childprocs.append(subprocess.run(['docker', 'build', '-t', 'test_image', '.'], cwd=PROJECT_BASE_PATH))\n subprocess.Popen(['docker-compose', 'up', '--scale', 'worker={}'.format(WORKERS_COUNT), '--scale',\n 'writer={}'.format(WORKERS_COUNT)], cwd='./', env=my_env)\n\n time.sleep(SLEEP_BEFORE_REQUESTS)\n\n if check_request():\n print('Success!!\\nAll tests passed.')\n else:\n print('Failed')\n\n finally:\n for p in childprocs:\n if isinstance(p, subprocess.Popen):\n p.terminate()\n\n\ndef check_request():\n for query in QUERY_RESULTS:\n res = requests.get('http://localhost:5000/search?{}'.format(query))\n\n if res.status_code != 200:\n print('Bad status code: ', res.status_code)\n return False\n\n result_articles = res.content[36:-23].split(b'\\n')\n\n if len(result_articles) != len(QUERY_RESULTS[query]):\n return False\n\n for result_article in result_articles:\n if result_article not in QUERY_RESULTS[query]:\n return False\n\n return True\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "tests/main_test.py", "file_name": "main_test.py", "file_ext": "py", "file_size_in_byte": 2305, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 17, "usage_type": "call"}, {"api_name": "common.constants.REQUIREMENTS_BASE_PATH", "line_number": 17, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 18, "usage_type": "call"}, {"api_name": "common.constants.WORDS_BASE_PATH", "line_number": 18, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "common.constants.WORDS_BASE_PATH", "line_number": 19, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 23, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ.copy", "line_number": 32, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "common.constants.TEST_ARTICLES_BASE_PATH", "line_number": 33, "usage_type": "name"}, {"api_name": "common.constants.WORDS_BASE_PATH", "line_number": 34, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 36, "usage_type": "call"}, {"api_name": "common.constants.SRC_BASE_PATH", "line_number": 36, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 38, "usage_type": "call"}, {"api_name": "common.constants.PROJECT_BASE_PATH", "line_number": 38, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 39, "usage_type": "call"}, {"api_name": "common.constants.WORKERS_COUNT", "line_number": 39, "usage_type": "argument"}, {"api_name": "common.constants.WORKERS_COUNT", "line_number": 40, "usage_type": "argument"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tests.expected_results.QUERY_RESULTS", "line_number": 56, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 57, "usage_type": "call"}, {"api_name": "tests.expected_results.QUERY_RESULTS", "line_number": 65, "usage_type": "name"}, {"api_name": "tests.expected_results.QUERY_RESULTS", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "60439869", "text": "# Import pandas, matplotlib and numpy\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n# import sys to read CMD input\nimport sys\n\ndef plotPDFs(Q2list):\n \"\"\"\n Given a list of Q2 values, it plots and saves the\n plots that compare the PDFs given by CT18 and NNPDF\n with the ones predicted by IHQCD\n \"\"\"\n gluonPDFIHQCD = pd.read_csv(\"xg_pred_Q2max_10.txt\", sep = '\\t')\n for Q2 in Q2list:\n # Get data and the errors from NNPDF\n gluonPDFDataNNPDF = pd.read_csv(\"NNPDF31_nnlo_as_0118_nf_4_Q2_\"+str(Q2)+\".txt\", sep = '\\t')\n # Get data and the errors from CT18NNLO\n gluonPDFDataCT18 = pd.read_csv(\"CT18NNLO_Q2_\"+str(Q2)+\".txt\", sep = '\\t')\n # Get IHQCD prediction for the gluon pdfs\n gluonPDFIHQCDQ2 = gluonPDFIHQCD[gluonPDFIHQCD.Q2 == Q2]\n plt.figure()\n # Plot the data from NNPDF\n plt.plot(gluonPDFDataNNPDF.x, gluonPDFDataNNPDF.xg, \"b-\", label = r'NNPDF $Q^2 = $'+str(Q2))\n # Plot the data from CT18NNLO\n plt.plot(gluonPDFDataCT18.x, gluonPDFDataCT18.xg, \"r-\", label = r'CT18 $Q^2 = $'+str(Q2))\n # Plt the data from IHQCD\n plt.plot(gluonPDFIHQCDQ2.x, gluonPDFIHQCDQ2.xg, \"k-\", label = r'IHQCD $Q^2 = $'+str(Q2))\n plt.xlabel(\"x\")\n plt.xscale(\"log\")\n plt.xlim(10**(-6),10**(-2))\n plt.ylabel(r'$xg(x,Q^2)$')\n plt.fill_between(gluonPDFDataNNPDF.x, gluonPDFDataNNPDF.xg - gluonPDFDataNNPDF[\"error-\"],\n gluonPDFDataNNPDF.xg + gluonPDFDataNNPDF[\"error+\"], facecolor='blue', alpha = 0.2)\n plt.fill_between(gluonPDFDataCT18.x, gluonPDFDataCT18.xg - gluonPDFDataCT18[\"error-\"],\n gluonPDFDataCT18.xg + gluonPDFDataCT18[\"error+\"], facecolor='red', alpha = 0.2)\n plt.legend()\n plt.savefig(\"gluon_PDF_Q2_\"+str(Q2)+ \"_Q2max_10.pdf\")\n plt.show()\n\n# Read input from command line\nargument_vector = sys.argv\nif(len(argument_vector) == 1):\n print(\"Usage: \" + argument_vector[0] + \" Q2 vals.\")\nelse:\n # The first value of argument_vector is the program name so we exclude it\n Q2vals = list(map(int,argument_vector[1:]))\n # Do the plots now\n plotPDFs(Q2vals)", "sub_path": "plots/Gluon_PDFs/plotPDFs_Q2max_10.py", "file_name": "plotPDFs_Q2max_10.py", "file_ext": "py", "file_size_in_byte": 2192, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "478962872", "text": "from django.core.management import BaseCommand\n\nfrom currency.helper import RateManager, CurrencyManager\n\n\nclass Command(BaseCommand):\n help = 'get data from curl https://api-pub.bitfinex.com/v2/candles/trade:1m:tBTCUSD/last'\n\n def handle(self, *args, **kwargs):\n cm = CurrencyManager()\n cm.get_by_mask()\n\n rm = RateManager()\n rm.update_rate(time=\"1M\")\n rm.clear_history(days=20)\n", "sub_path": "app/currency/management/commands/candles.py", "file_name": "candles.py", "file_ext": "py", "file_size_in_byte": 421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "django.core.management.BaseCommand", "line_number": 6, "usage_type": "name"}, {"api_name": "currency.helper.CurrencyManager", "line_number": 10, "usage_type": "call"}, {"api_name": "currency.helper.RateManager", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "366060780", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\nfrom os.path import join\n\n\ndef fetch(img_dir, name):\n if name[-1] == 'F':\n name = name[:-1]\n\n img = cv2.imread(join(img_dir, name))\n if img.shape == 2:\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n elif img.shape == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\ndef resize(img, size=(1024, 768)):\n assert len(size) == 2\n return cv2.resize(img, size, interpolation=cv2.INTER_CUBIC)\n\n\ndef pad(img, size=(1024, 768)):\n assert len(img.shape) == 3\n assert len(size) == 2\n h, w, _ = img.shape\n assert w <= size[0] and h <= size[1]\n pad_vert = np.ceil((size[1]-h) / 2).astype(np.uint32)\n pad_hor = np.ceil((size[0]-w) / 2).astype(np.uint32)\n\n padded = np.zeros((size[1], size[0], 3)).astype(np.uint8)\n padded[pad_vert:pad_vert+h, pad_hor:pad_hor+w, :] = img.copy()\n return padded\n\n\n\n\n\n\n\n", "sub_path": "utils/preprocessing.py", "file_name": "preprocessing.py", "file_ext": "py", "file_size_in_byte": 1004, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "cv2.imread", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2RGB", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.ceil", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.uint32", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.ceil", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.uint32", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "407499405", "text": "import requests\nimport mysql.connector\nimport json\n\nr=requests.get('https://fr.openfoodfacts.org/cgi/search.pl?action=process&tagtype_0=categories&tag_contains_0=contains&tag_0=biscuits&page_size=500&sort_by=unique_scans_n&countries=France&json=1')\ndict = r.json()\n#print(dict)\nproducts= dict['products']\n\n\n\nbdd = mysql.connector.connect(host=\"192.168.1.77\",user=\"student\",password=\"wired\", database=\"pur_beurre\")\ncursor = bdd.cursor()\ni=0\n\n\n\n\n\nfor item in products:\n\n if ('nutrition_grade_fr' or\n 'brands' or\n 'product_name' or\n 'url') not in products[i]:\n del products[i]\n else:\n cursor.execute(\"INSERT INTO Product (name_product, brand_product, nutritional_note, link_open_food_fact) VALUES (%s, %s, %s, %s)\",\n (products[i]['product_name'], products[i]['brands'],\n products[i]['nutrition_grade_fr'], products[i]['url']))\n i=i+1\n print(i)\n\nbdd.commit()\n", "sub_path": "script_request.py", "file_name": "script_request.py", "file_ext": "py", "file_size_in_byte": 927, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 12, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "124710897", "text": "import sys\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef main():\n\n im = cv2.imread('../images/1/pool table.jpg', 0)\n im_med5 = cv2.medianBlur(im,5)\n # im_med5 = cv2.GaussianBlur(im, (5,5), 0)\n\n\n scale = 1\n delta = 0\n ddepth = cv2.CV_16S\n # Gradient-X\n grad_x = cv2.Sobel(im_med5, ddepth, 1, 0, ksize=3, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT)\n # grad_x = cv2.Scharr(im_med5,ddepth,1,0)\n\n # Gradient-Y\n grad_y = cv2.Sobel(im_med5, ddepth, 0, 1, ksize=3, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT)\n # grad_y = cv2.Scharr(im_med5,ddepth,0,1)\n\n abs_grad_x = cv2.convertScaleAbs(grad_x) # converting back to uint8\n abs_grad_y = cv2.convertScaleAbs(grad_y)\n\n # dst = cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)\n dst = cv2.add(abs_grad_x,abs_grad_y)\n\n dst = cv2.GaussianBlur(dst, (9,9), 0, dst)\n p3 = plt.figure(3)\n plt.imshow(dst)\n\n im_can = cv2.Canny(dst, 50, 150, True)\n minLineLength = 1\n lines = cv2.HoughLines(im_can, 100, np.pi/180, 100)\n\n x0=0\n xend = im.shape[1]-1\n\n for line in lines:\n rho = line[0][0]\n theta = line[0][1]\n a = np.cos(theta)\n b = np.sin(theta)\n\n try:\n y0 = np.round((-a/b) * x0 + rho/b, 0)\n y0 = int(y0)\n yend = np.round((-a/b)*xend + rho/b, 0)\n yend = int(yend)\n cv2.line(im, (x0, y0), (xend, yend),(225,0,0),2)\n except ValueError:\n pass\n\n cv2.imshow(\"img\", im)\n cv2.waitKey()\n # for x1, y1, x2, y2 in lines[0]:\n # cv2.line(im, (x1, y1), (x2, y2), (0, 255, 0), 2)\n\n cv2.imwrite('../images/1/houghlines5.jpg', im)\n\n # im_med5 = cv2.medianBlur(im, 9)\n # im_can = cv2.Sobel(im)\n\n p1 = plt.figure(1)\n plt.imshow(dst)\n\n p2 = plt.figure(2)\n plt.imshow(im_can)\n plt.show(block=True)\n\n\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "src/pool_table.py", "file_name": "pool_table.py", "file_ext": "py", "file_size_in_byte": 1926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "cv2.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.CV_16S", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.BORDER_DEFAULT", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.BORDER_DEFAULT", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.convertScaleAbs", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.convertScaleAbs", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.add", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "cv2.Canny", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.HoughLines", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "68961326", "text": "# -*- coding: utf-8 -*-\n\nimport base64\nimport hashlib\nimport hmac\nimport httplib\nimport iso8601\nimport json\nimport logging\nimport platform\n\nfrom datetime import datetime;\nfrom datetime import timedelta;\n\ntry:\n from google.appengine.api import urlfetch\nexcept ImportError:\n urlfetch = None\n\nfrom urllib import quote\n\n\nSIGNATURE_WINDOW_SIZE = 1; # minute\n\n\nclass SignatureValidationError(Exception):\n \"\"\"\n Exception thrown when a signed request is invalid.\n \"\"\"\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return repr(self.msg)\n\n\ndef signed_request(params):\n \"\"\"\n Generates the signed request string from the parameters.\n\n @param params Object containing POST parameters passed during the signed request.\n\n @return Query string containing the parameters of the signed request.\n\n Note this method does not calculate a signature; it simply generates the signed request from\n the parameters including the signature.\n \"\"\"\n has_signature = False\n keys = params.keys()\n if \"signature\" in keys:\n has_signature = True\n keys.remove(\"signature\")\n keys.sort()\n if has_signature:\n keys.append(\"signature\")\n query_string = \"&\".join(quote(key, \"~\") + \"=\" + quote(params[key], \"~\") for key in keys)\n return query_string\n\n\nclass API:\n \"\"\"\n Speakap API wrapper\n\n You should instantiate the Speakap API as follows:\n\n speakap_api = Speakap.API({\n \"scheme\": \"https\",\n \"hostname\": \"api.speakap.io\",\n \"app_id\": MY_APP_ID,\n \"app_secret\": MY_APP_SECRET,\n \"api_version\": API_VERSION\n })\n\n Your Speakap API hostname depends on your network location: \n - USA: api.usa.speakap.io\n - EU: api.speakap.io\n\n Obviously, MY_APP_ID and MY_APP_SECRET should be replaced with your actual App ID and secret\n (or be constants containing those).\n\n After you have instantiated the API wrapper, you can perform API calls as follows:\n\n (json_result, error) = speakap_api.get(\"/networks/%s/user/%s/\" % (network_eid, user_eid))\n\n (json_result, error) = speakap_api.post(\"/networks/%s/messages/\" % network_eid, {\n \"body\": \"test 123\",\n \"messageType\": \"update\",\n \"recipient\": { \"type\": \"network\", \"EID\": network_eid }\n })\n\n The JSON result contains the already parsed reply in case of success, but is None in case of\n an error. The error variable is None in case of success, but is an object containing code\n and message properties in case of an error.\n\n WARNING: If you use this class to make requests on any other platform than Google App Engine,\n the SSL certificate of the Speakap API service is not confirmed, leaving you\n vulnerable to man-in-the-middle attacks. This is due to a limitation of the SSL\n support in the Python framework. You are strongly advised to take your own\n precautions to make sure the certificate is valid.\n \"\"\"\n def __init__(self, config):\n self.scheme = config[\"scheme\"]\n self.hostname = config[\"hostname\"]\n self.app_id = config[\"app_id\"]\n self.app_secret = config[\"app_secret\"]\n self.api_version = config[\"api_version\"] if \"api_version\" in config else \"latest\"\n\n self.access_token = \"%s_%s\" % (self.app_id, self.app_secret)\n\n def delete(self, path):\n \"\"\"\n Performs a DELETE request to the Speakap API\n\n @param path The path of the REST endpoint, including optional query parameters.\n\n @return A tuple containing the parsed JSON reply (in case of success) and an error object\n (in case of an error).\n\n Example:\n\n (json_result, error) = speakap_api.delete(\"/networks/%s/messages/%s/\" % (network_eid, message_eid))\n if json_result:\n ... do something with json_result ...\n else\n ... do something with error ...\n \"\"\"\n response = self._request(\"DELETE\", path)\n return self._handle_response(response)\n\n def get(self, path):\n \"\"\"\n Performs a GET request to the Speakap API\n\n @param path The path of the REST endpoint, including optional query parameters.\n\n @return A tuple containing the parsed JSON reply (in case of success) and an error object\n (in case of an error).\n\n Example:\n\n (json_result, error) = speakap_api.get(\"/networks/%s/timeline/?embed=messages.author\" % network_eid)\n if json_result:\n ... do something with json_result ...\n else\n ... do something with error ...\n \"\"\"\n response = self._request(\"GET\", path)\n return self._handle_response(response)\n\n def post(self, path, data):\n \"\"\"\n Performs a POST request to the Speakap API\n\n @param path The path of the REST endpoint, including optional query parameters.\n @param data Object representing the JSON object to submit.\n\n @return A tuple containing the parsed JSON reply (in case of success) and an error object\n (in case of an error).\n\n Note that if you want to make a POST request to an action (generally all REST endpoints\n without trailing slash), you should use the post_action() method instead, as this will use\n the proper formatting for the POST data.\n\n Example:\n\n (json_result, error) = speakap_api.post(\"/networks/%s/messages/\" % network_eid, {\n \"body\": \"test 123\",\n \"messageType\": \"update\",\n \"recipient\": { \"type\": \"network\", \"EID\": network_eid }\n })\n if json_result:\n ... do something with json_result ...\n else\n ... do something with error ...\n \"\"\"\n response = self._request(\"POST\", path, json.dumps(data))\n return self._handle_response(response)\n\n def post_action(self, path, data=None):\n \"\"\"\n Performs a POST request to an action endpoint in the Speakap API.\n\n @param path The path of the REST endpoint, including optional query parameters.\n @param data Optional object containing the form parameters to submit.\n\n @return A tuple containing the parsed JSON reply (in case of success) and an error object\n (in case of an error).\n\n Example:\n\n (json_result, error) = speakap_api.post_action(\"/networks/%s/messages/%s/markread\" % (network_eid, message_eid))\n if json_result:\n ... do something with json_result ...\n else\n ... do something with error ...\n \"\"\"\n response = self._request(\"POST\", path, urllib.urlencode(data) if data else None)\n return self._handle_response(response)\n\n def put(self, path, data):\n \"\"\"\n Performs a PUT request to the Speakap API.\n\n @param path The path of the REST endpoint, including optional query parameters.\n @param data Object representing the JSON object to submit.\n\n @return A tuple containing the parsed JSON reply (in case of success) and an error object\n (in case of an error).\n\n Example:\n\n (json_result, error) = speakap_api.get(\"/networks/%s/timeline/?embed=messages.author\" % network_eid)\n if json_result:\n ... do something with json_result ...\n else\n ... do something with error ...\n \"\"\"\n response = self._create_connection(\"PUT\", path, json.dumps(data))\n return self._handle_response(response)\n\n def validate_signature(self, params):\n \"\"\"\n Validates the signature of a signed request.\n\n @param params Object containing POST parameters passed during the signed request.\n\n Raises a SignatureValidationError if the signature doesn't match or the signed request is\n expired.\n \"\"\"\n if \"signature\" not in params:\n raise SignatureValidationError(\"Parameters did not include a signature\")\n\n signature = params[\"signature\"]\n\n keys = params.keys()\n keys.sort()\n query_string = \"&\".join(quote(key, \"~\") + \"=\" + quote(params[key], \"~\") \\\n for key in keys if key != \"signature\")\n computed_hash = base64.b64encode(hmac.new(self.app_secret, query_string, hashlib.sha256)\n .digest())\n\n if computed_hash != signature:\n raise SignatureValidationError(\"Invalid signature: \" + query_string)\n\n issued_at = iso8601.parse_date(params[\"issuedAt\"])\n expires_at = issued_at + timedelta(minutes=SIGNATURE_WINDOW_SIZE)\n if datetime.utcnow() > expires_at.replace(tzinfo=None):\n raise SignatureValidationError(\"Expired signature\")\n\n def _request(self, method, path, data=None):\n headers = {\n \"Authorization\": \"Bearer \" + self.access_token,\n \"User-Agent\": \"Speakap-SDK python/1.0.1 (%s %s) Python/%s\" % (\n platform.system(),\n platform.release(),\n platform.python_version(),\n )\n }\n if self.api_version != \"latest\":\n headers[\"Accept\"] = \"application/vnd.speakap.api-v%s+json\" % self.api_version\n if urlfetch:\n response = urlfetch.fetch(self.scheme + \"://\" + self.hostname + path,\n headers=headers,\n method=method,\n payload=data,\n validate_certificate=True)\n status = response.status_code\n data = response.content\n else:\n if self.scheme == \"https\":\n connection = httplib.HTTPSConnection(self.hostname)\n else:\n connection = httplib.HTTPConnection(self.hostname)\n connection.request(method, path, data, headers)\n response = connection.getresponse()\n status = response.status\n data = response.read()\n connection.close()\n\n return (status, data)\n\n def _handle_response(self, response):\n (status, data) = response\n\n try:\n json_result = json.loads(data)\n except:\n status = 400\n json_result = { \"code\": -1001, \"message\": \"Unexpected Reply\" }\n\n if status >= 200 and status < 300:\n return (json_result, None)\n else:\n return (None, { \"code\": json_result[\"code\"], \"message\": json_result[\"message\"] })\n", "sub_path": "python/speakap.py", "file_name": "speakap.py", "file_ext": "py", "file_size_in_byte": 10590, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "google.appengine.api.urlfetch", "line_number": 18, "usage_type": "name"}, {"api_name": "urllib.quote", "line_number": 56, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 176, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 197, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 218, "usage_type": "call"}, {"api_name": "urllib.quote", "line_number": 237, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 239, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 239, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 239, "usage_type": "attribute"}, {"api_name": "iso8601.parse_date", "line_number": 245, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 246, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 247, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 247, "usage_type": "name"}, {"api_name": "platform.system", "line_number": 254, "usage_type": "call"}, {"api_name": "platform.release", "line_number": 255, "usage_type": "call"}, {"api_name": "platform.python_version", "line_number": 256, "usage_type": "call"}, {"api_name": "google.appengine.api.urlfetch", "line_number": 261, "usage_type": "name"}, {"api_name": "google.appengine.api.urlfetch.fetch", "line_number": 262, "usage_type": "call"}, {"api_name": "google.appengine.api.urlfetch", "line_number": 262, "usage_type": "name"}, {"api_name": "httplib.HTTPSConnection", "line_number": 271, "usage_type": "call"}, {"api_name": "httplib.HTTPConnection", "line_number": 273, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 286, "usage_type": "call"}]} +{"seq_id": "11681355", "text": "# This file contains *modified* copies of source code samples provided by Google for its APIs.\n# These code samples are licensed under the Apache 2.0 License:\n#\n# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport httplib2\nfrom oauth2client import client\nfrom oauth2client import file\nfrom oauth2client import tools\nfrom googleapiclient.discovery import build\n\n\ndef get_service(api_name, api_version, scope, client_secrets_path):\n \"\"\"Get a service that communicates to a Google API.\n\n Args:\n api_name: string The name of the api to connect to.\n api_version: string The api version to connect to.\n scope: A list of strings representing the auth scopes to authorize for the\n connection.\n client_secrets_path: string A path to a valid client secrets file.\n\n Returns:\n A service that is connected to the specified API.\n \"\"\"\n # Parse command-line arguments.\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args([])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n client_secrets_path, scope=scope,\n message=tools.message_if_missing(client_secrets_path))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage(api_name + '.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=httplib2.Http())\n\n # Build the service object.\n service = build(api_name, api_version, http=http)\n\n return service # type: googleapiclient.discovery.Resource\n", "sub_path": "ga_autosetup/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2515, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 40, "usage_type": "call"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 41, "usage_type": "attribute"}, {"api_name": "oauth2client.tools.argparser", "line_number": 42, "usage_type": "attribute"}, {"api_name": "oauth2client.tools", "line_number": 42, "usage_type": "name"}, {"api_name": "oauth2client.client.flow_from_clientsecrets", "line_number": 46, "usage_type": "call"}, {"api_name": "oauth2client.client", "line_number": 46, "usage_type": "name"}, {"api_name": "oauth2client.tools.message_if_missing", "line_number": 48, "usage_type": "call"}, {"api_name": "oauth2client.tools", "line_number": 48, "usage_type": "name"}, {"api_name": "oauth2client.file.Storage", "line_number": 54, "usage_type": "call"}, {"api_name": "oauth2client.file", "line_number": 54, "usage_type": "name"}, {"api_name": "oauth2client.tools.run_flow", "line_number": 57, "usage_type": "call"}, {"api_name": "oauth2client.tools", "line_number": 57, "usage_type": "name"}, {"api_name": "httplib2.Http", "line_number": 58, "usage_type": "call"}, {"api_name": "googleapiclient.discovery.build", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "297977986", "text": "import asyncio\nimport socket\nimport sys\n\nfrom .account import AccountFactory\nfrom .application import Application\nfrom .competitor import CompetitorManager\nfrom .controller import Controller\nfrom .execution import ExecutionServer\nfrom .heads_up import HeadsUpDisplayServer\nfrom .information import InformationPublisher\nfrom .limiter import FrequencyLimiterFactory\nfrom .market_events import MarketEventsReader\nfrom .match_events import MatchEvents, MatchEventsWriter\nfrom .order_book import OrderBook\nfrom .score_board import ScoreBoardWriter\nfrom .timer import Timer\nfrom .types import Instrument\n\n\n# From Python 3.8, the proactor event loop is used by default on Windows\nif sys.platform == \"win32\" and hasattr(asyncio, \"WindowsSelectorEventLoopPolicy\"):\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\n\ndef __validate_hostname(config, section, key):\n try:\n config[section][key] = socket.gethostbyname(config[section][key])\n except socket.error:\n raise Exception(\"Could not validate hostname in %s.%s configuration\" % (section, key))\n\n\ndef __validate_object(config, section, required_keys, value_types):\n obj = config[section]\n if type(obj) is not dict:\n raise Exception(\"%s configuration should be a JSON object\" % section)\n if any(k not in obj for k in required_keys):\n raise Exception(\"A required key is missing from the %s configuration\" % section)\n if any(type(obj[k]) is not t for k, t in zip(required_keys, value_types)):\n raise Exception(\"Element of inappropriate type in %s configuration\" % section)\n\n\ndef __exchange_config_validator(config):\n \"\"\"Return True if the specified config is valid, otherwise raise an exception.\"\"\"\n if type(config) is not dict:\n raise Exception(\"Configuration file contents should be a JSON object\")\n if any(k not in config for k in (\"Engine\", \"Execution\", \"Fees\", \"Information\", \"Instrument\", \"Limits\", \"Traders\")):\n raise Exception(\"A required key is missing from the configuration\")\n\n __validate_object(config, \"Engine\", (\"MarketDataFile\", \"MarketOpenDelay\", \"MatchEventsFile\", \"ScoreBoardFile\",\n \"Speed\", \"TickInterval\"), (str, float, str, str, float, float))\n __validate_object(config, \"Execution\", (\"Host\", \"Port\"), (str, int))\n __validate_object(config, \"Fees\", (\"Maker\", \"Taker\"), (float, float))\n __validate_object(config, \"Hud\", (\"Host\", \"Port\"), (str, int))\n __validate_object(config, \"Information\", (\"MulticastAddress\", \"Interface\", \"Port\"), (str, str, int))\n __validate_object(config, \"Instrument\", (\"EtfClamp\", \"TickSize\",), (float, float))\n __validate_object(config, \"Limits\", (\"ActiveOrderCountLimit\", \"ActiveVolumeLimit\", \"MessageFrequencyInterval\",\n \"MessageFrequencyLimit\", \"PositionLimit\"), (int, int, float, int, int))\n\n __validate_hostname(config, \"Execution\", \"Host\")\n __validate_hostname(config, \"Hud\", \"Host\")\n __validate_hostname(config, \"Information\", \"MulticastAddress\")\n __validate_hostname(config, \"Information\", \"Interface\")\n\n if type(config[\"Traders\"]) is not dict:\n raise Exception(\"Traders configuration should be a JSON object\")\n if any(type(k) is not str for k in config[\"Traders\"]):\n raise Exception(\"Key of inappropriate type in Traders configuration\")\n if any(type(v) is not str for v in config[\"Traders\"].values()):\n raise Exception(\"Element of inappropriate type in Traders configuration\")\n\n return True\n\n\ndef setup(app: Application):\n \"\"\"Setup the exchange simulator.\"\"\"\n engine = app.config[\"Engine\"]\n exec_ = app.config[\"Execution\"]\n info = app.config[\"Information\"]\n instrument = app.config[\"Instrument\"]\n limits = app.config[\"Limits\"]\n\n future_book = OrderBook(Instrument.FUTURE, 0.0, 0.0)\n etf_book = OrderBook(Instrument.ETF, app.config[\"Fees\"][\"Maker\"], app.config[\"Fees\"][\"Taker\"])\n\n match_events = MatchEvents()\n match_events_writer = MatchEventsWriter(match_events, engine[\"MatchEventsFile\"], app.event_loop)\n market_events_reader = MarketEventsReader(engine[\"MarketDataFile\"], app.event_loop, future_book, etf_book,\n match_events)\n score_board_writer = ScoreBoardWriter(engine[\"ScoreBoardFile\"], app.event_loop)\n\n timer = Timer(app.event_loop, engine[\"TickInterval\"], engine[\"Speed\"], market_events_reader)\n account_factory = AccountFactory(instrument[\"EtfClamp\"], instrument[\"TickSize\"])\n competitor_manager = CompetitorManager(app.config[\"Limits\"], app.config[\"Traders\"], account_factory, etf_book,\n future_book, match_events, score_board_writer,\n instrument[\"TickSize\"], timer)\n\n limiter_factory = FrequencyLimiterFactory(limits[\"MessageFrequencyInterval\"] / engine[\"Speed\"],\n limits[\"MessageFrequencyLimit\"])\n exec_server = ExecutionServer(app.event_loop, exec_[\"Host\"], exec_[\"Port\"], competitor_manager, limiter_factory,\n timer)\n info_publisher = InformationPublisher(app.event_loop, info[\"MulticastAddress\"], info[\"Port\"], info[\"Interface\"],\n (future_book, etf_book), timer)\n\n ctrl = Controller(app.event_loop, engine[\"MarketOpenDelay\"], exec_server, info_publisher,\n market_events_reader, match_events_writer, score_board_writer, timer)\n\n if \"Hud\" in app.config:\n hud_server = HeadsUpDisplayServer(app.event_loop, app.config[\"Hud\"][\"Host\"], app.config[\"Hud\"][\"Port\"],\n match_events, competitor_manager, timer)\n ctrl.set_heads_up_display_server(hud_server)\n\n app.event_loop.create_task(ctrl.start())\n\n\ndef main():\n app = Application(\"exchange\", __exchange_config_validator)\n setup(app)\n app.run()\n", "sub_path": "ready_trader_one/exchange.py", "file_name": "exchange.py", "file_ext": "py", "file_size_in_byte": 5942, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.platform", "line_number": 22, "usage_type": "attribute"}, {"api_name": "asyncio.set_event_loop_policy", "line_number": 23, "usage_type": "call"}, {"api_name": "asyncio.WindowsSelectorEventLoopPolicy", "line_number": 23, "usage_type": "call"}, {"api_name": "socket.gethostbyname", "line_number": 28, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 29, "usage_type": "attribute"}, {"api_name": "application.Application", "line_number": 75, "usage_type": "name"}, {"api_name": "order_book.OrderBook", "line_number": 83, "usage_type": "call"}, {"api_name": "types.Instrument.FUTURE", "line_number": 83, "usage_type": "attribute"}, {"api_name": "types.Instrument", "line_number": 83, "usage_type": "name"}, {"api_name": "order_book.OrderBook", "line_number": 84, "usage_type": "call"}, {"api_name": "types.Instrument.ETF", "line_number": 84, "usage_type": "attribute"}, {"api_name": "types.Instrument", "line_number": 84, "usage_type": "name"}, {"api_name": "match_events.MatchEvents", "line_number": 86, "usage_type": "call"}, {"api_name": "match_events.MatchEventsWriter", "line_number": 87, "usage_type": "call"}, {"api_name": "market_events.MarketEventsReader", "line_number": 88, "usage_type": "call"}, {"api_name": "score_board.ScoreBoardWriter", "line_number": 90, "usage_type": "call"}, {"api_name": "timer.Timer", "line_number": 92, "usage_type": "call"}, {"api_name": "account.AccountFactory", "line_number": 93, "usage_type": "call"}, {"api_name": "competitor.CompetitorManager", "line_number": 94, "usage_type": "call"}, {"api_name": "limiter.FrequencyLimiterFactory", "line_number": 98, "usage_type": "call"}, {"api_name": "execution.ExecutionServer", "line_number": 100, "usage_type": "call"}, {"api_name": "information.InformationPublisher", "line_number": 102, "usage_type": "call"}, {"api_name": "controller.Controller", "line_number": 105, "usage_type": "call"}, {"api_name": "heads_up.HeadsUpDisplayServer", "line_number": 109, "usage_type": "call"}, {"api_name": "application.Application", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "123091767", "text": "import MySQLdb\nimport sys\nimport datetime\n\ndef update(name):\n connection = MySQLdb.connect(\n host='localhost', user='root', passwd='', db='mydb', charset='utf8')\n cursor = connection.cursor()\n \n try:\n# cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS `iine` (\n# `id` int(11) AUTO_INCREMENT,\n# `name` varchar(128),\n# `first` varchar(128),\n# `second` varchar(128),\n# PRIMARY KEY (id)\n# )\"\"\")\n cursor.execute('select * from iine where name=%s', (name,))\n d1 = converter(cursor.fetchone()[2])\n t = datetime.datetime.now()\n h = t.strftime(\"%H\") \n h = 2\n if h > 0 and d1[h]==0:\n d1[h] += d1[h-1] \n d1[h] += 1 \n cursor.execute('UPDATE `iine` SET `first`=%s WHERE `name`=%s;', (adapter(d1), name))\n connection.commit()\n\n cursor.execute('select * from iine')\n print(cursor.fetchall())\n connection.close()\n\n except MySQLdb.Error as e:\n print('MySQLdb.Error: ', e)\n\ndef adapter(list):\n ret = ';'.join([str(i) for i in list])\n return ret\n\ndef converter(str):\n ret = [int(i) for i in str.split(';')]\n return ret\n\nif __name__ == '__main__':\n update(sys.argv[1])\n# print(adapter([1,2,3]))\n# print(converter(\"1;2;3\"))\n", "sub_path": "database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 1290, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "MySQLdb.connect", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "attribute"}, {"api_name": "MySQLdb.Error", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "315479229", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\n\n#!pip install pyyaml h5py --necessary to save models in HDF5 format\n\nos.chdir(\"archive/daySequence1/daySequence1/\")\ndata_dir = 'sorted_data' \n\n\n# *** ACTUAL MODEL STUFF ***\nbatch_size = 32\n#we might want to unify all the image sizes to the smaller size (640x960)\n\n#image params for the data we want to check\nimg_height = 640 \nimg_width = 960 \n\n#Fix these two training data sets, currently the function fails\ntrain_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"training\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\nval_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"validation\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n#all 3 of the annotation tags from the csv\nclass_names = train_ds.class_names\nprint(class_names) #['stop', 'go', 'warning']\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n\ntrain_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)\nval_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)\n\nnormalization_layer = layers.experimental.preprocessing.Rescaling(1./255)\n\nnormalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))\nimage_batch, labels_batch = next(iter(normalized_ds))\nfirst_image = image_batch[0]\n# Notice the pixels values are now in `[0,1]`.\nprint(np.min(first_image), np.max(first_image)) \n\nnum_classes = 3\n\nmodel = Sequential([\n layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Flatten(),\n layers.Dense(128, activation='relu'), \n layers.Dense(num_classes)\n])\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.summary()\n\nepochs=50 #increase epochs for higher accuracy, cost of time\nhistory = model.fit(\n train_ds,\n validation_data=val_ds,\n epochs=epochs\n)\n\nval_acc = history.history['val_accuracy']\n\nloss=history.history['loss']\nval_loss=history.history['val_loss']\n\nepochs_range = range(epochs)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, val_acc, label='Training Accuracy') \nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()\n#at this point the results of the model are available\ndata_augmentation = keras.Sequential(\n [\n layers.experimental.preprocessing.RandomFlip(\"horizontal\", \n input_shape=(img_height, \n img_width,\n 3)),\n layers.experimental.preprocessing.RandomRotation(0.1),\n layers.experimental.preprocessing.RandomZoom(0.1),\n ]\n)\n\nplt.figure(figsize=(10, 10))\nfor images, _ in train_ds.take(1):\n for i in range(9):\n augmented_images = data_augmentation(images)\n ax = plt.subplot(3, 3, i + 1)\n plt.imshow(augmented_images[0].numpy().astype(\"uint8\"))\n plt.axis(\"off\")\n\nmodel = Sequential([\n data_augmentation,\n layers.experimental.preprocessing.Rescaling(1./255),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Dropout(0.2),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dense(num_classes)\n])\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.summary()\n\nepochs = 15\nhistory = model.fit(\n train_ds,\n validation_data=val_ds,\n epochs = epochs\n)\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(epochs)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()\n\nos.mkdir(\"traffic_model\")\nmodel.save('traffic_model/model')", "sub_path": "imageClassifier.py", "file_name": "imageClassifier.py", "file_ext": "py", "file_size_in_byte": 5225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "os.chdir", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.image_dataset_from_directory", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.preprocessing.image_dataset_from_directory", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.experimental.preprocessing.Rescaling", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.experimental", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.experimental.preprocessing.Rescaling", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.experimental", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers", "line_number": 61, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 62, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 63, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 64, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 65, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 66, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 67, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 68, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 69, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 70, "usage_type": "name"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 74, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 107, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.experimental.preprocessing.RandomFlip", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.experimental", "line_number": 109, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers", "line_number": 109, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.experimental.preprocessing.RandomRotation", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.experimental", "line_number": 113, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers", "line_number": 113, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.experimental.preprocessing.RandomZoom", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.experimental", "line_number": 114, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.experimental.preprocessing.Rescaling", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.experimental", "line_number": 128, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers", "line_number": 128, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 129, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 130, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 131, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 132, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 133, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 134, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 134, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 135, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 136, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 136, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 137, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 137, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 138, "usage_type": "name"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 142, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 176, "usage_type": "call"}]} +{"seq_id": "535494722", "text": "import json\nimport pytest\n\nimport virtool.app\nimport virtool.users\n\n\nclass VTClient:\n\n def __init__(self, loop, test_client, db_host, test_db_name, create_user):\n self._loop = loop\n self._test_client = test_client\n self._create_user = create_user\n self._db_host = db_host\n self._test_db_name = test_db_name\n self._client = None\n\n self.server = None\n self.app = None\n self.db = None\n\n async def connect(self, authorize=False, administrator=False, groups=None, permissions=None, job_manager=False, file_manager=False,\n setup_mode=False):\n\n app = virtool.app.create_app(\n self._loop,\n self._db_host,\n self._test_db_name,\n disable_job_manager=not job_manager,\n disable_file_manager=not file_manager,\n disable_refreshing=True,\n ignore_settings=True,\n skip_db_checks=True,\n skip_setup=not setup_mode,\n no_sentry=True\n )\n\n self._client = await self._test_client(app)\n\n self._client.session.cookie_jar.update_cookies({\n \"session_id\": \"foobar\"\n })\n\n self.server = self._client.server\n self.app = self.server.app\n self.db = self.app.get(\"db\", None)\n\n if authorize:\n user_document = self._create_user(\"test\", administrator, groups, permissions)\n\n await self.db.users.insert_one(user_document)\n\n await self.db.sessions.insert_one({\n \"_id\": \"foobar\",\n \"ip\": \"127.0.0.1\",\n \"administrator\": administrator,\n \"user_agent\": \"Python/3.6 aiohttp/3.4.4\",\n \"user\": {\n \"id\": \"test\"\n },\n \"groups\": user_document[\"groups\"],\n \"permissions\": user_document[\"permissions\"]\n })\n\n return self\n\n async def get(self, url, params=None):\n return await self._client.get(url, params=params)\n\n async def post(self, url, data=None):\n payload = None\n\n if data:\n payload = json.dumps(data)\n\n return await self._client.post(url, data=payload)\n\n async def post_form(self, url, data):\n return await self._client.post(url, data=data)\n\n async def patch(self, url, data):\n return await self._client.patch(url, data=json.dumps(data))\n\n async def put(self, url, data):\n return await self._client.put(url, data=json.dumps(data))\n\n async def delete(self, url):\n return await self._client.delete(url)\n\n\n@pytest.fixture\ndef spawn_client(loop, request, test_client, test_motor, test_db_name, create_user):\n db_host = request.config.getoption(\"db_host\", \"localhost\")\n client = VTClient(loop, test_client, db_host, test_db_name, create_user)\n return client.connect\n", "sub_path": "tests/fixtures/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 2867, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "virtool.app.app.create_app", "line_number": 25, "usage_type": "call"}, {"api_name": "virtool.app.app", "line_number": 25, "usage_type": "attribute"}, {"api_name": "virtool.app", "line_number": 25, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 74, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 82, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 85, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 91, "usage_type": "attribute"}]} +{"seq_id": "282573572", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom enum import Enum\nimport json\nimport time\nimport requests\nimport urllib.parse\nfrom btcc_log import create_timed_rotating_log\n\nclass Market(Enum):\n All = \"all\"\n BtcCny = \"btccny\"\n LtcCny = \"ltccny\"\n LtcBtc = \"ltcbtc\"\n\n\n\nclass btcc_client (object):\n \"\"\"a wrapper for btcc trade info\"\"\"\n\n def __init__(self, market=Market.BtcCny):\n if (not isinstance(market, Market)):\n raise ValueError(\"market is not defined\")\n\n self._base_url = \"https://data.btcchina.com/data/\"\n self._market = market\n self._logger = create_timed_rotating_log()\n\n @property\n def market(self):\n return self._market\n\n def _request_and_return(self, url, payload={}):\n self._logger.info(\"Web request. url:{0}, payload:{1}\".format(url, json.dumps(payload)))\n payload[\"market\"] = self._market.value\n ret = None\n try:\n result = requests.get(url, params=payload)\n if (result.status_code == 200):\n self._logger.info(\"Web request succeeded\")\n ret = result.json()\n except Exception as e:\n self._logger.warn(str(e))\n finally:\n return ret\n\n def get_current_price(self):\n url = urllib.parse.urljoin(self._base_url, \"ticker\")\n return self._request_and_return(url)\n\n def get_last_24hr_trade(self):\n url = urllib.parse.urljoin(self._base_url, \"trades\")\n return self._request_and_return(url)\n\n def get_trade_history_since_time(self, start_time, limit=100):\n if (limit < 0 or limit > 5000):\n raise ValueError(\"limit should between 0 and 5000\")\n if (start_time > int(time.time())):\n raise ValueError(\"since time should be a past datetime\")\n\n payload = {}\n payload[\"since\"] = start_time\n payload[\"limit\"] = limit\n payload[\"sincetype\"] = \"time\"\n url = urllib.parse.urljoin(self._base_url, \"historydata\")\n\n print(start_time)\n return self._request_and_return(url, payload)\n\n def get_trade_history_since_id(self, id, limit=100):\n if (limit < 0 or limit > 5000):\n raise ValueError(\"limit should between 0 and 5000\")\n\n payload = {}\n payload[\"since\"] = id\n payload[\"limit\"] = limit\n print(payload)\n url = urllib.parse.urljoin(self._base_url, \"historydata\")\n return self._request_and_return(url, payload)\n\n def get_current_order(self, limit=None):\n payload = {}\n if (limit != None):\n if (limit < 0):\n raise ValueError(\"limit should be great than 0\")\n else:\n payload[\"limit\"] = limit\n\n url = urllib.parse.urljoin(self._base_url, \"orderbook\")\n return self._request_and_return(url, payload)", "sub_path": "python/btctrade/btcc_client.py", "file_name": "btcc_client.py", "file_ext": "py", "file_size_in_byte": 2833, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "enum.Enum", "line_number": 11, "usage_type": "name"}, {"api_name": "btcc_log.create_timed_rotating_log", "line_number": 28, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "urllib.parse.parse.urljoin", "line_number": 49, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 49, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 49, "usage_type": "name"}, {"api_name": "urllib.parse.parse.urljoin", "line_number": 53, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 53, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 53, "usage_type": "name"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "urllib.parse.parse.urljoin", "line_number": 66, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 66, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 66, "usage_type": "name"}, {"api_name": "urllib.parse.parse.urljoin", "line_number": 79, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 79, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 79, "usage_type": "name"}, {"api_name": "urllib.parse.parse.urljoin", "line_number": 90, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 90, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 90, "usage_type": "name"}]} +{"seq_id": "272400220", "text": "from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom unittest.mock import patch\nfrom household.models import Household, HouseholdUser\nfrom .models import Shop\n\n\n# Test Models\nclass ShopTestCase(TestCase):\n def setUp(self):\n self.mock_time = timezone.now()\n\n # Mock the Django time object\n with patch('django.utils.timezone.now') as mock_now:\n mock_now.return_value = self.mock_time\n\n temp = User.objects.create_user(\n username=\"test\", email='test@…', password='top_secret'\n )\n self.user = HouseholdUser.objects.get(user=temp)\n self.household = Household.objects.create(name=\"household\")\n\n Shop.objects.create(\n owner=self.user,\n household=self.household,\n price=100.00\n )\n\n def test_str(self):\n shop = Shop.objects.get(owner=self.user, household=self.household)\n self.assertEquals(str(shop), \"100.0 - test - {}\".format(self.mock_time))\n\n def test_date(self):\n shop = Shop.objects.get(owner=self.user, household=self.household)\n self.assertEquals(self.mock_time, shop.created)\n\n def test_price(self):\n shop = Shop.objects.get(owner=self.user, household=self.household)\n self.assertEquals(100.00, shop.price)\n\n", "sub_path": "server/shopping/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 1385, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "django.test.TestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 12, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 12, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 18, "usage_type": "name"}, {"api_name": "household.models.HouseholdUser.objects.get", "line_number": 21, "usage_type": "call"}, {"api_name": "household.models.HouseholdUser.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "household.models.HouseholdUser", "line_number": 21, "usage_type": "name"}, {"api_name": "household.models.Household.objects.create", "line_number": 22, "usage_type": "call"}, {"api_name": "household.models.Household.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "household.models.Household", "line_number": 22, "usage_type": "name"}, {"api_name": "models.Shop.objects.create", "line_number": 24, "usage_type": "call"}, {"api_name": "models.Shop.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.Shop", "line_number": 24, "usage_type": "name"}, {"api_name": "models.Shop.objects.get", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Shop.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.Shop", "line_number": 31, "usage_type": "name"}, {"api_name": "models.Shop.objects.get", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Shop.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Shop", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Shop.objects.get", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Shop.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.Shop", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "36719382", "text": "import os\r\nimport re\r\nimport requests\r\nimport json\r\nfrom config import LINE_API_PUSH, LINE_API_REPLY\r\n\r\n\r\ndef pushMsg(reply_token, line_aceess_token):\r\n authorization = 'Bearer {}'.format(line_aceess_token)\r\n headers = {\r\n 'Content-Type': 'application/json; charset=UTF-8',\r\n 'Authorization': authorization\r\n }\r\n\r\n data = {\r\n \"replyToken\": reply_token,\r\n \"messages\": [\r\n {\r\n \"type\": \"video\",\r\n \"originalContentUrl\": \"https://mokmoon.com/videos/Brown.mp4\",\r\n \"previewImageUrl\": \"https://linefriends.com/img/bangolufsen/img_og.jpg\"\r\n }\r\n ]\r\n }\r\n\r\n session = requests.Session()\r\n response = session.post(LINE_API_REPLY, data=json.dumps(data), headers=headers)\r\n return 201\r\n", "sub_path": "libs/chatbot_push_helper.py", "file_name": "chatbot_push_helper.py", "file_ext": "py", "file_size_in_byte": 797, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "requests.Session", "line_number": 26, "usage_type": "call"}, {"api_name": "config.LINE_API_REPLY", "line_number": 27, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "42583743", "text": "import opt\nimport sys\nimport config\nimport log\nimport os\nimport daemon\n\n\ndef run():\n parser = opt.create_parser()\n args = parser.parse_args(sys.argv[1:])\n conf = config.Config(args)\n journal = log.Log(path_file=conf.get(\"log\"))\n pid = os.fork()\n if pid == 0:\n daemon.start_daemion(\n config=conf,\n log=journal\n )\n elif pid > 0:\n journal.info(\"Close parrent process.\")\n else:\n journal.emerg(\"NOT STARTED CHILDREN PROCESS!\")\n\n\nif __name__ == '__main__':\n run()\n\n# file='/usr/master.cfg',\n# interval=2,\n# mysql='root:password@localhost/voip.cdr',\n# postgres='postgres:password@localhost/voip.cdr',\n# restart=False,\n# stop=False\n\n# dbname=\"test\", user=\"postgres\", password=\"secret\"\n", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 754, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "opt.create_parser", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 12, "usage_type": "call"}, {"api_name": "log.Log", "line_number": 13, "usage_type": "call"}, {"api_name": "os.fork", "line_number": 14, "usage_type": "call"}, {"api_name": "daemon.start_daemion", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "302490732", "text": "import sys\nfrom PIL import Image\nimport PIL\nimport numpy as np\nimport cv2\n\n\nimport os\n\nprint(\"Chack and Change path always !\")\n\nfn = input(\"Folder name : \")\n\npath, dirs, files = next(os.walk(\"images/\"+fn+\"/data/\"))\nfile_count = len(files)\nprint(file_count)\n\na = int(input(\"First number : \"))\nb = int(input(\"Last number (+1) : \"))\n\ndef createFolder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print ('Error: Creating directory. ' + directory)\n\nimg_list = []\n\n\n\nfor a in range (a, b):\n \taa = str(a)\n \t\timg_list.append( Image.open( \"images/\"+fn+\"/data/\" + aa + \".png\" ) )\n # Print info\n \tprint ('File:', aa)\n\nwidth, height = zip(*(i.size for i in img_list))\n\nwidth = 495\nheight = sum(height)\n\n#creates a new empty image, RGB mode\nnew = Image.new( 'RGB',(width, height))\n\nk=0\nfor im in img_list:\n\tnew.paste(im, (0, k))\n\n\tk += im.size[1]\n\n\n'''\nk=0\nfor j in range(1, 200):\n # paste the image at location i,j:\n mosaic.paste( img_list[k], (0, j) )\n \n # Select next image and text\n k = k + 1\n'''\n\n\n\ncreateFolder('images/Analysis_All/'+fn+'/data')\nprint(\" << Create folder /Analysis_All/\"+fn+\"/data/ >> \")\n\n# Save image to file\nnew.save('images/Analysis_All/'+fn+'/data/All.png')\nprint(\"Done\")", "sub_path": "combineImage.py", "file_name": "combineImage.py", "file_ext": "py", "file_size_in_byte": 1309, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.walk", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 34, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "18960567", "text": "import logging\n\nfrom src.api_rest.model.entity.EntityDayHourForecast import EntityDayHourForecast\nfrom src.api_rest.model.entity.EntityLocationForecast import EntityLocationForecast\nfrom src.api_rest.model.entity.EntityPlan import EntityPlan\nfrom src.api_rest.model.entity.EntityProduct import EntityProduct\nfrom src.api_rest.model.entity.EntityRoute import EntityRoute\nfrom src.api_rest.model.planning_strategies.StochasticVRPMultiDepotStrategy import StochasticVRPMultiDepotStrategy\nfrom src.commons import MongoProductFields, MongoRouteFields, MongoDayHourForecastFields, MongoLocationForecastFields, \\\n MongoPlanFields\n\n\n################################################################################\n# class: ProdutOperation\n################################################################################\n\nclass ProdutOperation:\n INCREMENT = \"increment\"\n DECREMENT = \"decrement\"\n\n### function: toJsonArray ###\n\ndef toJsonArray (entityList):\n try :\n jsonArray = []\n\n for entity in entityList : jsonArray.append (entity.toJson ())\n\n return jsonArray\n\n except Exception as exc :\n logging.error (\"utils: toJsonArray: Error getting a JSON array for the entity '\" + entityList [0].ENTITY_NAME)\n logging.error (\"[Exception: \" + str (exc) + \"]\")\n\n\n### function: toProducts ###\n\ndef toProducts (mongoCursor) :\n try :\n products = []\n\n for product in mongoCursor :\n products.append (EntityProduct (\n product [MongoProductFields.ID],\n product [MongoProductFields.NAME],\n product [MongoProductFields.QUANTITY]\n ))\n\n return products\n\n except Exception as exc :\n logging.error (\"utils: toProducts: Error parsing MongoProducts from mongo to \" + EntityProduct.ENTITY_NAME)\n logging.error (\"[Exception: \" + str (exc) + \"]\")\n\n\n### function: toRoute ###\n\ndef toRoutes (mongoCursor) :\n try:\n routes = []\n\n for route in mongoCursor :\n strategy = None\n\n if route [MongoRouteFields.STRATEGY] == StochasticVRPMultiDepotStrategy.STRATEGY_NAME :\n strategy = StochasticVRPMultiDepotStrategy ()\n\n routes.append (EntityRoute (\n route [MongoRouteFields.ORIGIN],\n route [MongoRouteFields.DESTINY],\n route [MongoRouteFields.DEPARTURE],\n route [MongoRouteFields.ARRIVAL],\n toProducts (route [MongoRouteFields.PRODUCTS]),\n strategy,\n route [MongoRouteFields.ID],\n route [MongoRouteFields.STATE]\n ))\n\n return routes\n\n except Exception as exc :\n logging.error (\"utils: toRoutes: Error parsing MongoRoutes from mongo to \" + EntityRoute.ENTITY_NAME)\n logging.error (\"[Exception: \" + str (exc) + \"]\")\n\n\n### function: toDayHourForecasts ###\n\ndef toDayHourForecasts (mongoCursor) :\n try :\n dayHourForecasts = []\n\n for dayHourForecast in mongoCursor :\n dayHourForecasts.append (EntityDayHourForecast (\n dayHourForecast [MongoDayHourForecastFields.DATE],\n dayHourForecast [MongoDayHourForecastFields.HOUR],\n dayHourForecast [MongoDayHourForecastFields.WEATHER],\n dayHourForecast [MongoDayHourForecastFields.WEATHER_DES],\n dayHourForecast [MongoDayHourForecastFields.TMP],\n dayHourForecast [MongoDayHourForecastFields.TMP_MIN],\n dayHourForecast [MongoDayHourForecastFields.TMP_MAX],\n dayHourForecast [MongoDayHourForecastFields.PRESSURE],\n dayHourForecast [MongoDayHourForecastFields.HUMIDITY],\n dayHourForecast [MongoDayHourForecastFields.WIND_SPEED]\n ))\n\n return dayHourForecasts\n\n except Exception as exc :\n logging.error (\"utils: toDayHourForecast: Error parsing MongoDayHourForecasts from mongo to \" + EntityDayHourForecast.ENTITY_NAME)\n logging.error (\"[Exception: \" + str (exc) + \"]\")\n\n\n### function: toLocationForecasts ###\n\ndef toLocationForecasts (mongoCursor) :\n try :\n locationForecasts = []\n\n for locationForecast in mongoCursor :\n locationForecasts.append (EntityLocationForecast (\n locationForecast [MongoLocationForecastFields.LATITUDE],\n locationForecast [MongoLocationForecastFields.LONGITUDE],\n locationForecast [MongoLocationForecastFields.COUNTRY],\n locationForecast [MongoLocationForecastFields.CITY],\n locationForecast [MongoLocationForecastFields.TIMEZONE],\n toDayHourForecasts (locationForecast [MongoLocationForecastFields.DAY_HOUR_FORECASTS])\n ))\n\n return locationForecasts\n\n except Exception as exc :\n logging.error (\"utils: toLocationForecasts: Error parsing MongoLocationForecast from mongo to \" + EntityLocationForecast.ENTITY_NAME)\n logging.error (\"[Exception: \" + str (exc) + \"]\")\n\n\n### function: toPlans ###\n\ndef toPlans (mongoCursor) :\n try :\n plans = []\n\n for plan in mongoCursor :\n plans.append (EntityPlan (\n toRoutes ([plan [MongoPlanFields.ROUTE]]) [0],\n plan [MongoPlanFields.PLAN],\n toLocationForecasts (plan [MongoPlanFields.LOCATION_FORECASTS]),\n plan [MongoPlanFields.ID],\n plan [MongoPlanFields.DATE_CREATION],\n plan [MongoPlanFields.HOUR_CREATION]\n ))\n\n return plans\n\n except Exception as exc :\n logging.error (\"utils: toPlans: Error parsing MongoPlan from mongo to \" + EntityPlan.ENTITY_NAME)\n logging.error (\"[Exception: \" + str (exc) + \"]\")", "sub_path": "src/api_rest/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5749, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "logging.error", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 33, "usage_type": "call"}, {"api_name": "src.api_rest.model.entity.EntityProduct.EntityProduct", "line_number": 43, "usage_type": "call"}, {"api_name": "src.commons.MongoProductFields.ID", "line_number": 44, "usage_type": "attribute"}, {"api_name": "src.commons.MongoProductFields", "line_number": 44, "usage_type": "name"}, {"api_name": "src.commons.MongoProductFields.NAME", "line_number": 45, "usage_type": "attribute"}, {"api_name": "src.commons.MongoProductFields", "line_number": 45, "usage_type": "name"}, {"api_name": "src.commons.MongoProductFields.QUANTITY", "line_number": 46, "usage_type": "attribute"}, {"api_name": "src.commons.MongoProductFields", "line_number": 46, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 52, "usage_type": "call"}, {"api_name": "src.api_rest.model.entity.EntityProduct.EntityProduct.ENTITY_NAME", "line_number": 52, "usage_type": "attribute"}, {"api_name": "src.api_rest.model.entity.EntityProduct.EntityProduct", "line_number": 52, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 53, "usage_type": "call"}, {"api_name": "src.commons.MongoRouteFields.STRATEGY", "line_number": 65, "usage_type": "attribute"}, {"api_name": "src.commons.MongoRouteFields", "line_number": 65, "usage_type": "name"}, {"api_name": "src.api_rest.model.planning_strategies.StochasticVRPMultiDepotStrategy.StochasticVRPMultiDepotStrategy.STRATEGY_NAME", "line_number": 65, "usage_type": "attribute"}, {"api_name": "src.api_rest.model.planning_strategies.StochasticVRPMultiDepotStrategy.StochasticVRPMultiDepotStrategy", "line_number": 65, "usage_type": "name"}, {"api_name": "src.api_rest.model.planning_strategies.StochasticVRPMultiDepotStrategy.StochasticVRPMultiDepotStrategy", "line_number": 66, "usage_type": "call"}, {"api_name": "src.api_rest.model.entity.EntityRoute.EntityRoute", "line_number": 68, "usage_type": "call"}, {"api_name": "src.commons.MongoRouteFields.ORIGIN", "line_number": 69, "usage_type": "attribute"}, {"api_name": "src.commons.MongoRouteFields", "line_number": 69, "usage_type": "name"}, {"api_name": "src.commons.MongoRouteFields.DESTINY", "line_number": 70, "usage_type": "attribute"}, {"api_name": "src.commons.MongoRouteFields", "line_number": 70, "usage_type": "name"}, {"api_name": "src.commons.MongoRouteFields.DEPARTURE", "line_number": 71, "usage_type": "attribute"}, {"api_name": "src.commons.MongoRouteFields", "line_number": 71, "usage_type": "name"}, {"api_name": "src.commons.MongoRouteFields.ARRIVAL", "line_number": 72, "usage_type": "attribute"}, {"api_name": "src.commons.MongoRouteFields", "line_number": 72, "usage_type": "name"}, {"api_name": "src.commons.MongoRouteFields.PRODUCTS", "line_number": 73, "usage_type": "attribute"}, {"api_name": "src.commons.MongoRouteFields", "line_number": 73, "usage_type": "name"}, {"api_name": "src.commons.MongoRouteFields.ID", "line_number": 75, "usage_type": "attribute"}, {"api_name": "src.commons.MongoRouteFields", "line_number": 75, "usage_type": "name"}, {"api_name": "src.commons.MongoRouteFields.STATE", "line_number": 76, "usage_type": "attribute"}, {"api_name": "src.commons.MongoRouteFields", "line_number": 76, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 82, "usage_type": "call"}, {"api_name": "src.api_rest.model.entity.EntityRoute.EntityRoute.ENTITY_NAME", "line_number": 82, "usage_type": "attribute"}, {"api_name": "src.api_rest.model.entity.EntityRoute.EntityRoute", "line_number": 82, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 83, "usage_type": "call"}, {"api_name": "src.api_rest.model.entity.EntityDayHourForecast.EntityDayHourForecast", "line_number": 93, "usage_type": "call"}, {"api_name": "src.commons.MongoDayHourForecastFields.DATE", "line_number": 94, "usage_type": "attribute"}, {"api_name": "src.commons.MongoDayHourForecastFields", "line_number": 94, "usage_type": "name"}, {"api_name": "src.commons.MongoDayHourForecastFields.HOUR", "line_number": 95, "usage_type": "attribute"}, {"api_name": "src.commons.MongoDayHourForecastFields", "line_number": 95, "usage_type": "name"}, {"api_name": "src.commons.MongoDayHourForecastFields.WEATHER", "line_number": 96, "usage_type": "attribute"}, {"api_name": "src.commons.MongoDayHourForecastFields", "line_number": 96, "usage_type": "name"}, {"api_name": "src.commons.MongoDayHourForecastFields.WEATHER_DES", "line_number": 97, "usage_type": "attribute"}, {"api_name": "src.commons.MongoDayHourForecastFields", "line_number": 97, "usage_type": "name"}, {"api_name": "src.commons.MongoDayHourForecastFields.TMP", "line_number": 98, "usage_type": "attribute"}, {"api_name": "src.commons.MongoDayHourForecastFields", "line_number": 98, "usage_type": "name"}, {"api_name": "src.commons.MongoDayHourForecastFields.TMP_MIN", "line_number": 99, "usage_type": "attribute"}, {"api_name": "src.commons.MongoDayHourForecastFields", "line_number": 99, "usage_type": "name"}, {"api_name": "src.commons.MongoDayHourForecastFields.TMP_MAX", "line_number": 100, "usage_type": "attribute"}, {"api_name": "src.commons.MongoDayHourForecastFields", "line_number": 100, "usage_type": "name"}, {"api_name": "src.commons.MongoDayHourForecastFields.PRESSURE", "line_number": 101, "usage_type": "attribute"}, {"api_name": "src.commons.MongoDayHourForecastFields", "line_number": 101, "usage_type": "name"}, {"api_name": "src.commons.MongoDayHourForecastFields.HUMIDITY", "line_number": 102, "usage_type": "attribute"}, {"api_name": "src.commons.MongoDayHourForecastFields", "line_number": 102, "usage_type": "name"}, {"api_name": "src.commons.MongoDayHourForecastFields.WIND_SPEED", "line_number": 103, "usage_type": "attribute"}, {"api_name": "src.commons.MongoDayHourForecastFields", "line_number": 103, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 109, "usage_type": "call"}, {"api_name": "src.api_rest.model.entity.EntityDayHourForecast.EntityDayHourForecast.ENTITY_NAME", "line_number": 109, "usage_type": "attribute"}, {"api_name": "src.api_rest.model.entity.EntityDayHourForecast.EntityDayHourForecast", "line_number": 109, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 110, "usage_type": "call"}, {"api_name": "src.api_rest.model.entity.EntityLocationForecast.EntityLocationForecast", "line_number": 120, "usage_type": "call"}, {"api_name": "src.commons.MongoLocationForecastFields.LATITUDE", "line_number": 121, "usage_type": "attribute"}, {"api_name": "src.commons.MongoLocationForecastFields", "line_number": 121, "usage_type": "name"}, {"api_name": "src.commons.MongoLocationForecastFields.LONGITUDE", "line_number": 122, "usage_type": "attribute"}, {"api_name": "src.commons.MongoLocationForecastFields", "line_number": 122, "usage_type": "name"}, {"api_name": "src.commons.MongoLocationForecastFields.COUNTRY", "line_number": 123, "usage_type": "attribute"}, {"api_name": "src.commons.MongoLocationForecastFields", "line_number": 123, "usage_type": "name"}, {"api_name": "src.commons.MongoLocationForecastFields.CITY", "line_number": 124, "usage_type": "attribute"}, {"api_name": "src.commons.MongoLocationForecastFields", "line_number": 124, "usage_type": "name"}, {"api_name": "src.commons.MongoLocationForecastFields.TIMEZONE", "line_number": 125, "usage_type": "attribute"}, {"api_name": "src.commons.MongoLocationForecastFields", "line_number": 125, "usage_type": "name"}, {"api_name": "src.commons.MongoLocationForecastFields.DAY_HOUR_FORECASTS", "line_number": 126, "usage_type": "attribute"}, {"api_name": "src.commons.MongoLocationForecastFields", "line_number": 126, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 132, "usage_type": "call"}, {"api_name": "src.api_rest.model.entity.EntityLocationForecast.EntityLocationForecast.ENTITY_NAME", "line_number": 132, "usage_type": "attribute"}, {"api_name": "src.api_rest.model.entity.EntityLocationForecast.EntityLocationForecast", "line_number": 132, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 133, "usage_type": "call"}, {"api_name": "src.api_rest.model.entity.EntityPlan.EntityPlan", "line_number": 143, "usage_type": "call"}, {"api_name": "src.commons.MongoPlanFields.ROUTE", "line_number": 144, "usage_type": "attribute"}, {"api_name": "src.commons.MongoPlanFields", "line_number": 144, "usage_type": "name"}, {"api_name": "src.commons.MongoPlanFields.PLAN", "line_number": 145, "usage_type": "attribute"}, {"api_name": "src.commons.MongoPlanFields", "line_number": 145, "usage_type": "name"}, {"api_name": "src.commons.MongoPlanFields.LOCATION_FORECASTS", "line_number": 146, "usage_type": "attribute"}, {"api_name": "src.commons.MongoPlanFields", "line_number": 146, "usage_type": "name"}, {"api_name": "src.commons.MongoPlanFields.ID", "line_number": 147, "usage_type": "attribute"}, {"api_name": "src.commons.MongoPlanFields", "line_number": 147, "usage_type": "name"}, {"api_name": "src.commons.MongoPlanFields.DATE_CREATION", "line_number": 148, "usage_type": "attribute"}, {"api_name": "src.commons.MongoPlanFields", "line_number": 148, "usage_type": "name"}, {"api_name": "src.commons.MongoPlanFields.HOUR_CREATION", "line_number": 149, "usage_type": "attribute"}, {"api_name": "src.commons.MongoPlanFields", "line_number": 149, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 155, "usage_type": "call"}, {"api_name": "src.api_rest.model.entity.EntityPlan.EntityPlan.ENTITY_NAME", "line_number": 155, "usage_type": "attribute"}, {"api_name": "src.api_rest.model.entity.EntityPlan.EntityPlan", "line_number": 155, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "158132233", "text": "from pymongo import MongoClient\n\nclass GuruMatchDatabase(object):\n URI = \"mongodb+srv://guruMatch:orion123@gurumatch-db.j176d.mongodb.net/myFirstDatabase?retryWrites=true&w=majority\"\n DATABASE = None\n\n @staticmethod\n def initialize():\n print(\"initializing the MongoDB\")\n client = MongoClient(GuruMatchDatabase.URI)\n GuruMatchDatabase.DATABASE = client.get_database(\"guruMatchDatabase\")\n print(GuruMatchDatabase.DATABASE[\"user-db\"].count_documents({}))\n \n @staticmethod\n def idExist(userID):\n if GuruMatchDatabase.DATABASE[\"user-db\"].find_one({\"_id\": userID}) is None:\n return False\n return True\n\n @staticmethod\n def isUsernameExist(userId):\n # true = 1, false = 2 [because of protobuf consider 0 as default]\n print(GuruMatchDatabase.DATABASE[\"user-db\"].find_one({\"_id\": userId}, {\"profile.username\": 1,\"_id\": 0}))\n userNameExist = GuruMatchDatabase.DATABASE[\"user-db\"].find_one({\"_id\": userId}, {\"profile.username\": 1, \"_id\": 0})\n if (userNameExist is None) or (len(userNameExist[\"profile\"])) == 0:\n return 2\n return 1\n\n @staticmethod\n def insertNewUser(userId, name):\n print(\"inserting new User data\")\n GuruMatchDatabase.DATABASE['user-db'].insert_one({\"_id\": userId, \"profile\":{\"name\": name}, \"mentee\": {}, \"mentor\":{}})\n\n @staticmethod\n def insertMenteeSelectedMentor(mentorID, menteeID):\n \"\"\"\n when mentee select the mentor, then we will store the menteeID in mentor matching document based by mentorID\n \"\"\"\n print(\"inserting the mentor that mentee want to Match\")\n GuruMatchDatabase.DATABASE['matchingDatabase'].update_one(\n {\"_id\": mentorID},\n {\n \"$push\":{\"mentee\": menteeID}\n },\n upsert = True)\n \n @staticmethod\n def insertMentorSelectedMentee(menteeID, mentorID):\n print(\"inserting the mentee that mentor want to teach\")\n GuruMatchDatabase.DATABASE['matchingDatabase'].update_one(\n {\"_id\": menteeID},\n {\n \"$push\":{\"mentor\": mentorID}\n },\n upsert = True)\n \n @staticmethod\n def getAllMatchRequest(userID):\n print(\"Getting all the matches request\")\n res = GuruMatchDatabase.DATABASE['matchingDatabase'].find_one({\"_id\" : userID}, {\"_id\": 0})\n print(res)\n return res\n\n\n @staticmethod\n def insertUserForm(userId, userFormData):\n print(\"inserting User Form data\")\n # we dont need to check the userId, because we authenticate the use before using this \n # function and userID is send by login where login extract the userId from database, so userID will always be valid\n GuruMatchDatabase.DATABASE[\"user-db\"].update_one(\n {\"_id\":userId}, \n {\n \"$set\":userFormData\n })\n \n @staticmethod\n def getUserProfile(userId):\n print(\"Getting user Profile\")\n return (GuruMatchDatabase.DATABASE[\"user-db\"].find_one({\"_id\": userId},{\"profile\":1, \"_id\":0})[\"profile\"])\n\n # can use this for both creating mentor and mentee even though it say create\n @staticmethod\n def createMenteeAndMentor(userId, userInput):\n print(\"inserting the the mentee or mentor input\")\n print(userInput)\n GuruMatchDatabase.DATABASE[\"user-db\"].update_one(\n {\"_id\": userId},\n {\n \"$set\": userInput\n }\n )\n \n @staticmethod\n def getMatchMentors(userId):\n \"\"\"\n it will return all the mentors that match the mentee interest\n \"\"\"\n menteeProfile = GuruMatchDatabase.DATABASE[\"user-db\"].find_one({\"_id\": userId})\n # Check if the user have set up the mentee interest or not\n userMenteeSetUp = True\n if (len(menteeProfile[\"mentee\"]) == 0 or menteeProfile[\"mentee\"][\"interest\"] == None):\n userMenteeSetUp = False\n # find() will return the cursor to first document from our collection in database\n alluser = GuruMatchDatabase.DATABASE[\"user-db\"].find()\n currentNumberOfUser = 0\n totalUserReturn = 10\n listOfReturnUser = list()\n # we will only return 10 users\n if (not userMenteeSetUp):\n try:\n while( currentNumberOfUser < totalUserReturn):\n currentCursor = alluser.next()\n if (currentCursor[\"_id\"] == userId or (len(currentCursor[\"mentor\"]) == 0 or currentCursor[\"mentor\"][\"interest\"] is None)):\n continue\n listOfReturnUser.append(currentCursor)\n currentNumberOfUser += 1\n except StopIteration:\n pass\n else:\n menteeInterest = menteeProfile[\"mentee\"][\"interest\"]\n try:\n while(currentNumberOfUser < totalUserReturn):\n currentUserCursor = alluser.next()\n if (currentUserCursor[\"_id\"] != userId and len(currentUserCursor[\"mentor\"]) > 0 and currentUserCursor[\"mentor\"][\"interest\"] is not None):\n if (any(item in currentUserCursor[\"mentor\"][\"interest\"] for item in menteeInterest)):\n listOfReturnUser.append(currentUserCursor)\n else:\n continue\n except StopIteration:\n if (len(listOfReturnUser) < totalUserReturn):\n try:\n alluser = GuruMatchDatabase.DATABASE[\"user-db\"].find()\n while( currentNumberOfUser < totalUserReturn):\n # TODO make sure to add new user at end if there is less mentor matched\n # have to start from the beginning with find()\n currentUserCursor = alluser.next()\n if (currentUserCursor[\"_id\"] != userId and len(currentUserCursor[\"mentor\"]) > 0 and currentUserCursor[\"mentor\"][\"interest\"] is not None):\n listOfReturnUser.append(currentUserCursor)\n currentNumberOfUser += 1\n except StopIteration:\n pass\n return listOfReturnUser\n\n @staticmethod\n def getMatchMentees(userId):\n \"\"\"\n we could have use the above method since both static method are same, but it highly \n reduce the readibility and simplicity so I have decided to write method which is \n similar to above method. \n\n It will return all the mentees which match with the mentor\n \"\"\"\n mentorProfile = GuruMatchDatabase.DATABASE[\"user-db\"].find_one({\"_id\": userId})\n # Check if the user have set up the mentor interest or not and if not set up then\n # return random 10 users\n userMentorSetUp = True\n if (len(mentorProfile[\"mentor\"]) == 0 or mentorProfile[\"mentor\"][\"interest\"] == None):\n userMentorSetUp = False\n # find() will return the cursor to first document from our collection in database\n alluser = GuruMatchDatabase.DATABASE[\"user-db\"].find()\n currentNumberOfUser = 0\n totalUserReturn = 10\n listOfReturnUser = list()\n # we will only return 10 users\n if (not userMentorSetUp):\n try:\n while( currentNumberOfUser < totalUserReturn):\n currentCursor = alluser.next()\n if (currentCursor[\"_id\"] == userId or len(currentCursor[\"mentee\"]) == 0 or currentCursor[\"mentee\"][\"interest\"] is None):\n continue\n listOfReturnUser.append(currentCursor)\n currentNumberOfUser += 1\n except StopIteration:\n print(\"STop Iteration\")\n else:\n mentorInterest = mentorProfile[\"mentor\"][\"interest\"]\n try:\n while(currentNumberOfUser < totalUserReturn):\n currentUserCursor = alluser.next()\n if (currentUserCursor[\"_id\"] != userId and len(currentUserCursor[\"mentee\"]) > 0 and currentUserCursor[\"mentee\"][\"interest\"] is not None):\n if (any(item in currentUserCursor[\"mentee\"][\"interest\"] for item in mentorInterest)):\n listOfReturnUser.append(currentUserCursor)\n else:\n continue\n except StopIteration:\n if (len(listOfReturnUser) < totalUserReturn):\n try:\n alluser = GuruMatchDatabase.DATABASE[\"user-db\"].find()\n while( currentNumberOfUser < totalUserReturn):\n # TODO make sure to add new user at end if there is less mentee matched\n # have to start from the beginning with find()\n currentUserCursor = alluser.next()\n if (currentUserCursor[\"_id\"] != userId and len(currentUserCursor[\"mentee\"]) > 0 and currentUserCursor[\"mentee\"][\"interest\"] is not None):\n listOfReturnUser.append(currentUserCursor)\n currentNumberOfUser += 1\n except StopIteration:\n pass\n return listOfReturnUser\n\n# Just for Testing\n#GuruMatchDatabase.initialize()\n#print(GuruMatchDatabase.isUsernameExist(\"633d25d5\"))\n#GuruMatchDatabase.insertNewUser({\"_id\": \"1345234ee\", \"name\": \"David Beckham\"})\n#GuruMatchDatabase.insertUserForm(\"1345234ee\", {\"profile.username\": \"Kicker\", \"profile.userBio\": \"I am soccer player\"})\n#print(GuruMatchDatabase.getUserProfile(\"608888188def3a0ceded6f12\"))\n#608895309e12f61b99d44169\n#\")))\n#print(\"final : \", GuruMatchDatabase.getMatchMentors(\"60944967595c0ef62c396663\"))\n#GuruMatchDatabase.insertMentorMatch(\"60944967595c0ef62c396663\", \"6095b0890d71363c20c3ddbe\")\n#print(GuruMatchDatabase.getMatchMentors(\"609c6fb9984f29493e5419ea\"))\n\n \n\n\n\n\n\n\n", "sub_path": "application_server/server/database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 10018, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pymongo.MongoClient", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "156370502", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 24 13:40:25 2020\n\n@author: qtckp\n\"\"\"\n\n\nimport os, shutil\nimport json\nimport googletrans\n\n\n\ndirectory='./text_logger'\nfile_name='text_logger3.py'\n\nif os.path.exists(directory):\n shutil.rmtree(directory)\n\nos.makedirs(directory)\n\nshutil.copyfile(file_name,os.path.join(directory,file_name))\n\n\n\nlangs = {value: key for key, value in googletrans.LANGUAGES.items()}\n\nwith open(os.path.join(directory,\"languges.json\"), \"w\") as write_file:\n json.dump(langs, write_file, indent=4)\n\n", "sub_path": "SecondTry/create_data.py", "file_name": "create_data.py", "file_ext": "py", "file_size_in_byte": 526, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 19, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 21, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "googletrans.LANGUAGES.items", "line_number": 27, "usage_type": "call"}, {"api_name": "googletrans.LANGUAGES", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "396355103", "text": "import boto3\nimport os\nimport json\n\n\ndef upload_to_s3(syllabus, dept):\n \"\"\"\n Upload the syllabus info of the department to s3\n :param syllabus: iterator of course info\n :param dept: abbr of the department. e.g. \"PSE\"\n :return: dict :=\n {\n 'Expiration': 'string',\n 'ETag': 'string',\n 'ServerSideEncryption': 'AES256'|'aws:kms',\n 'VersionId': 'string',\n 'SSECustomerAlgorithm': 'string',\n 'SSECustomerKeyMD5': 'string',\n 'SSEKMSKeyId': 'string',\n 'SSEKMSEncryptionContext': 'string',\n 'RequestCharged': 'requester'\n }\n \"\"\"\n s3 = boto3.resource('s3', region_name=\"ap-northeast-1\")\n syllabus_object = s3.Object(os.getenv('BUCKET_NAME'), os.getenv('OBJECT_PATH') + dept + '.json')\n resp = syllabus_object.put(\n ACL='public-read',\n Body=bytes(json.dumps(syllabus).encode('UTF-8')),\n ContentType='application/json',\n CacheControl='max-age=86400, must-revalidate'\n )\n return resp\n", "sub_path": "scraper/s3util.py", "file_name": "s3util.py", "file_ext": "py", "file_size_in_byte": 1046, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "boto3.resource", "line_number": 24, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 25, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "274362505", "text": "import os\r\nfrom posixpath import relpath\r\nfrom dropbox.files import WriteMode \r\nimport dropbox\r\n\r\nclass TransferData :\r\n def __init__(self,access_token) :\r\n self.access_token = access_token\r\n\r\n def upload(self,forrm, too) :\r\n dbx = dropbox.Dropbox(self.access_token)\r\n for root,dir,files in os.walk(forrm) :\r\n for i in files :\r\n localpath = os.path.join(root,i)\r\n rlpath = os.path.relpath(localpath,forrm)\r\n dbpath = os.path.join(too,rlpath) \r\n with open(localpath,\"rb\")as f :\r\n dbx.files_upload(f.read(),dbpath,mode = WriteMode('overwrite'))\r\n\r\ndef main() :\r\n access_token = \"AM9FI4pN-MoAAAAAAAAAARONzJAz1fDZ1kz1MTbClpItVrYuO6296MfZzINTSblB\"\r\n td = TransferData(access_token)\r\n filefrom = input(\"Enter file source :\")\r\n filetoo = input(\"Enter DropBox location :\")\r\n td.upload(filefrom,filetoo)\r\n print(\"File Moved\")\r\n\r\nmain()", "sub_path": "uploadstorage.py", "file_name": "uploadstorage.py", "file_ext": "py", "file_size_in_byte": 968, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "dropbox.Dropbox", "line_number": 11, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "dropbox.files.WriteMode", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "410123245", "text": "from django.core.exceptions import ImproperlyConfigured\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\nimport random\n\nfrom django_filters import FilterSet, CharFilter, NumberFilter\n# Create your views here.\n\nfrom .forms import VariationInventoryFormSet, PackageFilterForm\nfrom .mixins import StaffRequiredMixin\nfrom .models import Package, Variation, Category\n\n\nclass CategoryListView(ListView):\n model = Category\n queryset = Category.objects.all()\n template_name = \"packages/package_list.html\"\n\n\nclass CategoryDetailView(DetailView):\n model = Category\n\n def get_context_data(self, *args, **kwargs):\n context = super(CategoryDetailView, self).get_context_data(*args, **kwargs)\n obj = self.get_object()\n package_set = obj.product_set.all()\n default_package = obj.default_category.all()\n packages = (package_set | default_package).distinct()\n context[\"packages\"] = packages\n return context\n\n\nclass VariationListView(StaffRequiredMixin, ListView):\n model = Variation\n queryset = Variation.objects.all()\n\n def get_context_data(self, *args, **kwargs):\n context = super(VariationListView, self).get_context_data(*args, **kwargs)\n context[\"formset\"] = VariationInventoryFormSet(queryset=self.get_queryset())\n return context\n\n def get_queryset(self, *args, **kwargs):\n package_pk = self.kwargs.get(\"pk\")\n if package_pk:\n package = get_object_or_404(Package, pk=package_pk)\n queryset = Variation.objects.filter(product=package)\n return queryset\n\n def post(self, request, *args, **kwargs):\n formset = VariationInventoryFormSet(request.POST, request.FILES)\n if formset.is_valid():\n formset.save(commit=False)\n for form in formset:\n new_item = form.save(commit=False)\n # if new_item.title:\n package_pk = self.kwargs.get(\"pk\")\n package = get_object_or_404(Package, pk=package_pk)\n new_item.package = package\n new_item.save()\n\n messages.success(request, \"Your inventory and pricing has been updated.\")\n return redirect(\"packages\")\n raise Http404\n\n\nclass PackageFilter(FilterSet):\n title = CharFilter(name='title', lookup_type='icontains', distinct=True)\n category = CharFilter(name='categories__title', lookup_type='icontains', distinct=True)\n category_id = CharFilter(name='categories__id', lookup_type='icontains', distinct=True)\n min_price = NumberFilter(name='variation__price', lookup_type='gte', distinct=True) # (some_price__gte=somequery)\n max_price = NumberFilter(name='variation__price', lookup_type='lte', distinct=True)\n\n class Meta:\n model = Package\n fields = [\n 'min_price',\n 'max_price',\n 'category',\n 'title',\n 'description',\n ]\n\n\ndef package_list(request):\n qs = Package.objects.all()\n ordering = request.GET.get(\"ordering\")\n if ordering:\n qs = Package.objects.all().order_by(ordering)\n f = PackageFilter(request.GET, queryset=qs)\n return render(request, \"packages/package_list.html\", {\"object_list\": f})\n\n\nclass FilterMixin(object):\n filter_class = None\n search_ordering_param = \"ordering\"\n\n def get_queryset(self, *args, **kwargs):\n try:\n qs = super(FilterMixin, self).get_queryset(*args, **kwargs)\n return qs\n except:\n raise ImproperlyConfigured(\"You must have a queryset in order to use the FilterMixin\")\n\n def get_context_data(self, *args, **kwargs):\n context = super(FilterMixin, self).get_context_data(*args, **kwargs)\n qs = self.get_queryset()\n ordering = self.request.GET.get(self.search_ordering_param)\n if ordering:\n qs = qs.order_by(ordering)\n filter_class = self.filter_class\n if filter_class:\n f = filter_class(self.request.GET, queryset=qs)\n context[\"object_list\"] = f\n return context\n\n\nclass PackageListView(FilterMixin, ListView):\n model = Package\n queryset = Package.objects.all()\n filter_class = PackageFilter\n\n def get_context_data(self, *args, **kwargs):\n context = super(PackageListView, self).get_context_data(*args, **kwargs)\n context[\"now\"] = timezone.now()\n context[\"query\"] = self.request.GET.get(\"q\") # None\n context[\"filter_form\"] = PackageFilterForm(data=self.request.GET or None)\n return context\n\n def get_queryset(self, *args, **kwargs):\n qs = super(PackageListView, self).get_queryset(*args, **kwargs)\n query = self.request.GET.get(\"q\")\n if query:\n qs = self.model.objects.filter(\n Q(title__icontains=query) |\n Q(description__icontains=query)\n )\n try:\n qs2 = self.model.objects.filter(\n Q(price=query)\n )\n qs = (qs | qs2).distinct()\n except:\n pass\n return qs\n\n\nclass PackageDetailView(DetailView):\n model = Package\n\n # template_name = \"product.html\"\n # template_name = \"/_detail.html\"\n def get_context_data(self, *args, **kwargs):\n context = super(PackageDetailView, self).get_context_data(*args, **kwargs)\n instance = self.get_object()\n # order_by(\"-title\")\n context[\"related\"] = sorted(Package.objects.get_related(instance)[:6], key=lambda x: random.random())\n return context\n\n\ndef package_detail_view_func(request, id):\n # package_instance = Product.objects.get(id=id)\n package_instance = get_object_or_404(Package, id=id)\n try:\n package_instance = Package.objects.get(id=id)\n except Package.DoesNotExist:\n raise Http404\n except:\n raise Http404\n\n template = \"packages/package_detail.html\"\n context = {\n \"object\": package_instance\n }\n return render(request, template, context)\n", "sub_path": "packages/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6274, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "django.views.generic.list.ListView", "line_number": 19, "usage_type": "name"}, {"api_name": "models.Category", "line_number": 20, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 21, "usage_type": "name"}, {"api_name": "django.views.generic.detail.DetailView", "line_number": 25, "usage_type": "name"}, {"api_name": "models.Category", "line_number": 26, "usage_type": "name"}, {"api_name": "mixins.StaffRequiredMixin", "line_number": 38, "usage_type": "name"}, {"api_name": "django.views.generic.list.ListView", "line_number": 38, "usage_type": "name"}, {"api_name": "models.Variation", "line_number": 39, "usage_type": "name"}, {"api_name": "models.Variation.objects.all", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Variation.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Variation", "line_number": 40, "usage_type": "name"}, {"api_name": "forms.VariationInventoryFormSet", "line_number": 44, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 50, "usage_type": "call"}, {"api_name": "models.Package", "line_number": 50, "usage_type": "argument"}, {"api_name": "models.Variation.objects.filter", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Variation.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.Variation", "line_number": 51, "usage_type": "name"}, {"api_name": "forms.VariationInventoryFormSet", "line_number": 55, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 62, "usage_type": "call"}, {"api_name": "models.Package", "line_number": 62, "usage_type": "argument"}, {"api_name": "django.contrib.messages.success", "line_number": 66, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 68, "usage_type": "name"}, {"api_name": "django_filters.FilterSet", "line_number": 71, "usage_type": "name"}, {"api_name": "django_filters.CharFilter", "line_number": 72, "usage_type": "call"}, {"api_name": "django_filters.CharFilter", "line_number": 73, "usage_type": "call"}, {"api_name": "django_filters.CharFilter", "line_number": 74, "usage_type": "call"}, {"api_name": "django_filters.NumberFilter", "line_number": 75, "usage_type": "call"}, {"api_name": "django_filters.NumberFilter", "line_number": 76, "usage_type": "call"}, {"api_name": "models.Package", "line_number": 79, "usage_type": "name"}, {"api_name": "models.Package.objects.all", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Package.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "models.Package", "line_number": 90, "usage_type": "name"}, {"api_name": "models.Package.objects.all", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Package.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.Package", "line_number": 93, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 95, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 107, "usage_type": "call"}, {"api_name": "django.views.generic.list.ListView", "line_number": 122, "usage_type": "name"}, {"api_name": "models.Package", "line_number": 123, "usage_type": "name"}, {"api_name": "models.Package.objects.all", "line_number": 124, "usage_type": "call"}, {"api_name": "models.Package.objects", "line_number": 124, "usage_type": "attribute"}, {"api_name": "models.Package", "line_number": 124, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 129, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 129, "usage_type": "name"}, {"api_name": "forms.PackageFilterForm", "line_number": 131, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 139, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 140, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 144, "usage_type": "call"}, {"api_name": "django.views.generic.detail.DetailView", "line_number": 152, "usage_type": "name"}, {"api_name": "models.Package", "line_number": 153, "usage_type": "name"}, {"api_name": "models.Package.objects.get_related", "line_number": 161, "usage_type": "call"}, {"api_name": "models.Package.objects", "line_number": 161, "usage_type": "attribute"}, {"api_name": "models.Package", "line_number": 161, "usage_type": "name"}, {"api_name": "random.random", "line_number": 161, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 167, "usage_type": "call"}, {"api_name": "models.Package", "line_number": 167, "usage_type": "argument"}, {"api_name": "models.Package.objects.get", "line_number": 169, "usage_type": "call"}, {"api_name": "models.Package.objects", "line_number": 169, "usage_type": "attribute"}, {"api_name": "models.Package", "line_number": 169, "usage_type": "name"}, {"api_name": "models.Package.DoesNotExist", "line_number": 170, "usage_type": "attribute"}, {"api_name": "models.Package", "line_number": 170, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 171, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 173, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 179, "usage_type": "call"}]} +{"seq_id": "518896849", "text": "import pygame\nimport random\n\nimport config\nfrom tile import Tile\nfrom score import Score\n\n\n\nsize = config.size\nscale = config.scale\ncolor = config.color\n\npygame.init()\n\nscreen = pygame.display.set_mode([size[\"x\"], size[\"y\"]])\nclock = pygame.time.Clock()\n\ncount = 30\n\nsocre = 0\n\n\ns = Score()\n\nr = random.randint(0,3)\ntest_tile = Tile(r)\n\nrunning = True\n\ndef check_hit(l):\n if(test_tile.get_hit(l)):\n s.add(100)\n else:\n s.add(-100)\n\ndef show_score():\n screen.blit(s.show(),(50,50))\n\ndef draw_background():\n for i in range(1, 8):\n pygame.draw.line(screen, color[\"line\"],\n (i * scale[\"v_line\"], 0), (i * scale[\"v_line\"], size[\"y\"]), scale[\"v_line_size\"])\n pygame.draw.line(screen, color[\"line\"],\n (0, scale[\"h_line\"]), (size[\"x\"], scale[\"h_line\"]), scale[\"h_line_size\"])\n\ndef check_event(e):\n for event in e:\n if event.type == pygame.QUIT:\n pygame.quit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n elif event.key == pygame.K_d:\n check_hit(0)\n elif event.key == pygame.K_f:\n check_hit(1)\n elif event.key == pygame.K_j:\n check_hit(2)\n elif event.key == pygame.K_k:\n check_hit(3)\n \n\nwhile running:\n check_event(pygame.event.get())\n screen.fill((255, 255, 255))\n draw_background()\n\n show_score()\n \n test_tile.update(5)\n test_tile.draw(screen)\n\n pygame.display.flip()\n pygame.display.update()\n\n clock.tick(60)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "config.size", "line_number": 10, "usage_type": "attribute"}, {"api_name": "config.scale", "line_number": 11, "usage_type": "attribute"}, {"api_name": "config.color", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 17, "usage_type": "attribute"}, {"api_name": "score.Score", "line_number": 24, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 26, "usage_type": "call"}, {"api_name": "tile.Tile", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.K_d", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.K_f", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.K_j", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.K_k", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 75, "usage_type": "attribute"}]} +{"seq_id": "487678601", "text": "\n\nimport argparse\nimport sys\nfrom Bio import SeqIO\n\nparser = argparse.ArgumentParser(description=\"extract reads from genome reads from kraken results\")\n\nparser.add_argument(\"--kraken_input\", help=\"kraken output\", type=argparse.FileType('r'))\nparser.add_argument(\"--fasta_file\", help=\"fasta file to extract sequencegres\", type=str)\nparser.add_argument('--taxonomy_file', help='jellyfish files for each strain', type=argparse.FileType('r'))\nparser.add_argument('--parent_taxid', help='parent taxid to extract', type=int)\n\nargs = parser.parse_args()\nkraken_input = args.kraken_input\nfasta_file = args.fasta_file\ntaxonomy_file = args.taxonomy_file\nparent_taxid = args.parent_taxid\n\n# base = \"/home/ksimmon/data/strain_typing/MiSeq_run_July_2015/renamed/148-6_3-Acinetobacter-gp3_processed_jellyfish_31/\"\n# kraken_input = base + \"148-6_3-Acinetobacter-gp3_kraken.krk\"\n# fasta_file = base + \"148-6_3-Acinetobacter-gp3_preprocessed_all.fasta\"\n# taxonomy_file = \"/home/ksimmon/reference/strian_typing_resources/kraken_acineto_db/taxonomy/nodes.dmp\"\n# parent_taxid = 2\n\n\ntax_tree = {}\n\nfor line in taxonomy_file:\n line = line.strip().split(\"\\t\")\n child, parent = int(line[0]), int(line[2])\n tax_tree.update({child:parent})\n\nreads_to_extract = set([])\nfor line in kraken_input:\n line = line.strip().split(\"\\t\")\n is_classified, read_name, taxid = line[0] == \"C\", line[1], int(line[2])\n if is_classified:\n while taxid != 1:\n if taxid == parent_taxid:\n reads_to_extract.add(read_name)\n break\n\n taxid = tax_tree[taxid]\n\nfor s in SeqIO.parse(fasta_file, \"fasta\"):\n if s.name in reads_to_extract:\n sys.stdout.write(\">{0}\\n{1}\\n\".format(s.name,s.seq,))\n", "sub_path": "straintypemer/utility_scripts/extract_reads.py", "file_name": "extract_reads.py", "file_ext": "py", "file_size_in_byte": 1725, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 9, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 11, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 46, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 46, "usage_type": "name"}, {"api_name": "sys.stdout.write", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 48, "usage_type": "attribute"}]} +{"seq_id": "179110518", "text": "import sqlite3\nfrom contextlib import closing\n\nif __name__ == '__main__':\n with closing(sqlite3.connect(\"customers.db\")) as connection:\n with closing(connection.cursor()) as cursor:\n # SQL piece of code Executed\n try:\n cursor.execute(\"DROP TABLE customers\")\n except sqlite3.OperationalError:\n print(\"Already exists the table\")\n\n cursor.executescript(\"\"\"\n CREATE TABLE customers(name VARCHAR(255), address VARCHAR(255));\n \"\"\")\n\n List = [('Dung', 'Binh Dinh'),\n ('Tuan', 'Phu Yen'),\n ('Royal', 'Vinh Phuc')]\n\n connection.executemany(\"\"\"INSERT INTO customers(name, address)\n VALUES (?, ?)\"\"\", List)\n\n sql = \"\"\"SELECT * FROM customers WHERE name = \"Hoi\";\"\"\"\n cursor.execute(sql)\n\n result = cursor.fetchall()\n for x in result:\n print(x)\n\n # Changes saved into database\n connection.commit()\n", "sub_path": "quiz2.py", "file_name": "quiz2.py", "file_ext": "py", "file_size_in_byte": 1063, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "contextlib.closing", "line_number": 5, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 5, "usage_type": "call"}, {"api_name": "contextlib.closing", "line_number": 6, "usage_type": "call"}, {"api_name": "sqlite3.OperationalError", "line_number": 10, "usage_type": "attribute"}]} +{"seq_id": "488295511", "text": "# -*- coding: utf-8 -*-\nfrom django import dispatch\nfrom django.contrib.sites.models import Site\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom rest_framework.authtoken.models import Token\n\nfrom threebot.models import Workflow\nfrom threebot.models import Worker\nfrom threebot.models import ParameterList\n\n\n@python_2_unicode_compatible\nclass Hook(models.Model):\n slug = models.SlugField(max_length=255)\n user = models.CharField(max_length=255, blank=True, null=True)\n repo = models.CharField(\n max_length=255, blank=True, null=True,\n help_text=u'Leave blank. Field is not used in the current version.')\n secret = models.CharField(\n max_length=255, blank=True, null=True,\n help_text=u'Leave blank. Field is not used in the current version.')\n workflow = models.ForeignKey(Workflow)\n worker = models.ForeignKey(Worker)\n param_list = models.ForeignKey(ParameterList)\n\n def get_hook_url(self):\n return \"%d-%d-%d-%s\" % (self.workflow.id, self.worker.id, self.param_list.id, self.slug)\n\n def __str__(self):\n return \"%s (%d)\" % (self.get_hook_url(), self.pk)\n\n def make_full_url(self, user):\n token, created = Token.objects.get_or_create(user=user)\n return \"https://%s/hooks/%s/%s-%s-%s/\" % (Site.objects.get_current().domain, token, self.workflow.id, self.worker.id, self.param_list.id)\n\n class Meta():\n verbose_name = _(\"Hook\")\n verbose_name_plural = _(\"Hooks\")\n db_table = 'threebot_hook'\n unique_together = (\"workflow\", \"worker\", \"param_list\")\n\n\nclass HookSignal(dispatch.Signal):\n pass\n\npre_hook_signal = HookSignal()\npost_hook_signal = HookSignal()\n", "sub_path": "threebot_hook/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "django.db.models.Model", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 25, "usage_type": "call"}, {"api_name": "threebot.models.Workflow", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 26, "usage_type": "call"}, {"api_name": "threebot.models.Worker", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 27, "usage_type": "call"}, {"api_name": "threebot.models.ParameterList", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "rest_framework.authtoken.models.Token.objects.get_or_create", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.models.Token.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "rest_framework.authtoken.models.Token", "line_number": 36, "usage_type": "name"}, {"api_name": "django.contrib.sites.models.Site.objects.get_current", "line_number": 37, "usage_type": "call"}, {"api_name": "django.contrib.sites.models.Site.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.contrib.sites.models.Site", "line_number": 37, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 40, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 41, "usage_type": "call"}, {"api_name": "django.utils.encoding.python_2_unicode_compatible", "line_number": 15, "usage_type": "name"}, {"api_name": "django.dispatch.Signal", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.dispatch", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "150282420", "text": "import re\n\nfrom bs4 import BeautifulSoup\n\n\nclass KaggleWord2VecUtility(object):\n \"\"\"KaggleWord2VecUtility is a utility class for processing raw HTML text into segments for further learning\"\"\"\n\n @staticmethod\n def review_to_wordlist( review, remove_stopwords=False ):\n # Function to convert a document to a sequence of words,\n # optionally removing stop words. Returns a list of words.\n #\n # 1. Remove HTML\n review_text = BeautifulSoup(review, features=\"html.parser\").get_text()\n #\n # 2. Remove non-letters\n review_text = re.sub(\"[^a-zA-Z]\",\" \", review_text)\n #\n # 3. Convert words to lower case and split them\n words = review_text.lower().split()\n #\n # 5. Return a list of words\n return(words)\n", "sub_path": "code/perceptron/KaggleWord2VecUtility.py", "file_name": "KaggleWord2VecUtility.py", "file_ext": "py", "file_size_in_byte": 802, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 15, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "420729061", "text": "from base64 import urlsafe_b64encode\nfrom uuid import uuid4\nfrom typing import List, Optional, Union\nimport httpx\nfrom lnbits.db import open_ext_db\nfrom lnbits.settings import WALLET\nfrom .models import Products, Orders, Indexers\nimport re\n\nregex = re.compile(\n r\"^(?:http|ftp)s?://\" # http:// or https://\n r\"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|\"\n r\"localhost|\"\n r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\"\n r\"(?::\\d+)?\"\n r\"(?:/?|[/?]\\S+)$\",\n re.IGNORECASE,\n)\n\n###Products\n\n\ndef create_diagonalleys_product(\n *,\n wallet_id: str,\n product: str,\n categories: str,\n description: str,\n image: str,\n price: int,\n quantity: int,\n) -> Products:\n with open_ext_db(\"diagonalley\") as db:\n product_id = urlsafe_b64encode(uuid4().bytes_le).decode(\"utf-8\")\n db.execute(\n \"\"\"\n INSERT INTO diagonalley.products (id, wallet, product, categories, description, image, price, quantity)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\",\n (\n product_id,\n wallet_id,\n product,\n categories,\n description,\n image,\n price,\n quantity,\n ),\n )\n\n return get_diagonalleys_product(product_id)\n\n\ndef update_diagonalleys_product(product_id: str, **kwargs) -> Optional[Indexers]:\n q = \", \".join([f\"{field[0]} = ?\" for field in kwargs.items()])\n\n with open_ext_db(\"diagonalley\") as db:\n db.execute(\n f\"UPDATE diagonalley.products SET {q} WHERE id = ?\",\n (*kwargs.values(), product_id),\n )\n row = db.fetchone(\n \"SELECT * FROM diagonalley.products WHERE id = ?\", (product_id,)\n )\n\n return get_diagonalleys_indexer(product_id)\n\n\ndef get_diagonalleys_product(product_id: str) -> Optional[Products]:\n with open_ext_db(\"diagonalley\") as db:\n row = db.fetchone(\n \"SELECT * FROM diagonalley.products WHERE id = ?\", (product_id,)\n )\n\n return Products(**row) if row else None\n\n\ndef get_diagonalleys_products(wallet_ids: Union[str, List[str]]) -> List[Products]:\n if isinstance(wallet_ids, str):\n wallet_ids = [wallet_ids]\n\n with open_ext_db(\"diagonalley\") as db:\n q = \",\".join([\"?\"] * len(wallet_ids))\n rows = db.fetchall(\n f\"SELECT * FROM diagonalley.products WHERE wallet IN ({q})\", (*wallet_ids,)\n )\n\n return [Products(**row) for row in rows]\n\n\ndef delete_diagonalleys_product(product_id: str) -> None:\n with open_ext_db(\"diagonalley\") as db:\n db.execute(\"DELETE FROM diagonalley.products WHERE id = ?\", (product_id,))\n\n\n###Indexers\n\n\ndef create_diagonalleys_indexer(\n wallet_id: str,\n shopname: str,\n indexeraddress: str,\n shippingzone1: str,\n shippingzone2: str,\n zone1cost: int,\n zone2cost: int,\n email: str,\n) -> Indexers:\n with open_ext_db(\"diagonalley\") as db:\n indexer_id = urlsafe_b64encode(uuid4().bytes_le).decode(\"utf-8\")\n db.execute(\n \"\"\"\n INSERT INTO diagonalley.indexers (id, wallet, shopname, indexeraddress, online, rating, shippingzone1, shippingzone2, zone1cost, zone2cost, email)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\",\n (\n indexer_id,\n wallet_id,\n shopname,\n indexeraddress,\n False,\n 0,\n shippingzone1,\n shippingzone2,\n zone1cost,\n zone2cost,\n email,\n ),\n )\n return get_diagonalleys_indexer(indexer_id)\n\n\ndef update_diagonalleys_indexer(indexer_id: str, **kwargs) -> Optional[Indexers]:\n q = \", \".join([f\"{field[0]} = ?\" for field in kwargs.items()])\n\n with open_ext_db(\"diagonalley\") as db:\n db.execute(\n f\"UPDATE diagonalley.indexers SET {q} WHERE id = ?\",\n (*kwargs.values(), indexer_id),\n )\n row = db.fetchone(\n \"SELECT * FROM diagonalley.indexers WHERE id = ?\", (indexer_id,)\n )\n\n return get_diagonalleys_indexer(indexer_id)\n\n\ndef get_diagonalleys_indexer(indexer_id: str) -> Optional[Indexers]:\n with open_ext_db(\"diagonalley\") as db:\n roww = db.fetchone(\n \"SELECT * FROM diagonalley.indexers WHERE id = ?\", (indexer_id,)\n )\n try:\n x = httpx.get(roww[\"indexeraddress\"] + \"/\" + roww[\"ratingkey\"])\n if x.status_code == 200:\n print(x)\n print(\"poo\")\n with open_ext_db(\"diagonalley\") as db:\n db.execute(\n \"UPDATE diagonalley.indexers SET online = ? WHERE id = ?\",\n (\n True,\n indexer_id,\n ),\n )\n else:\n with open_ext_db(\"diagonalley\") as db:\n db.execute(\n \"UPDATE diagonalley.indexers SET online = ? WHERE id = ?\",\n (\n False,\n indexer_id,\n ),\n )\n except:\n print(\"An exception occurred\")\n with open_ext_db(\"diagonalley\") as db:\n row = db.fetchone(\n \"SELECT * FROM diagonalley.indexers WHERE id = ?\", (indexer_id,)\n )\n return Indexers(**row) if row else None\n\n\ndef get_diagonalleys_indexers(wallet_ids: Union[str, List[str]]) -> List[Indexers]:\n if isinstance(wallet_ids, str):\n wallet_ids = [wallet_ids]\n\n with open_ext_db(\"diagonalley\") as db:\n q = \",\".join([\"?\"] * len(wallet_ids))\n rows = db.fetchall(\n f\"SELECT * FROM diagonalley.indexers WHERE wallet IN ({q})\", (*wallet_ids,)\n )\n\n for r in rows:\n try:\n x = httpx.get(r[\"indexeraddress\"] + \"/\" + r[\"ratingkey\"])\n if x.status_code == 200:\n with open_ext_db(\"diagonalley\") as db:\n db.execute(\n \"UPDATE diagonalley.indexers SET online = ? WHERE id = ?\",\n (\n True,\n r[\"id\"],\n ),\n )\n else:\n with open_ext_db(\"diagonalley\") as db:\n db.execute(\n \"UPDATE diagonalley.indexers SET online = ? WHERE id = ?\",\n (\n False,\n r[\"id\"],\n ),\n )\n except:\n print(\"An exception occurred\")\n with open_ext_db(\"diagonalley\") as db:\n q = \",\".join([\"?\"] * len(wallet_ids))\n rows = db.fetchall(\n f\"SELECT * FROM diagonalley.indexers WHERE wallet IN ({q})\", (*wallet_ids,)\n )\n return [Indexers(**row) for row in rows]\n\n\ndef delete_diagonalleys_indexer(indexer_id: str) -> None:\n with open_ext_db(\"diagonalley\") as db:\n db.execute(\"DELETE FROM diagonalley.indexers WHERE id = ?\", (indexer_id,))\n\n\n###Orders\n\n\ndef create_diagonalleys_order(\n *,\n productid: str,\n wallet: str,\n product: str,\n quantity: int,\n shippingzone: str,\n address: str,\n email: str,\n invoiceid: str,\n paid: bool,\n shipped: bool,\n) -> Indexers:\n with open_ext_db(\"diagonalley\") as db:\n order_id = urlsafe_b64encode(uuid4().bytes_le).decode(\"utf-8\")\n db.execute(\n \"\"\"\n INSERT INTO diagonalley.orders (id, productid, wallet, product, quantity, shippingzone, address, email, invoiceid, paid, shipped)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\",\n (\n order_id,\n productid,\n wallet,\n product,\n quantity,\n shippingzone,\n address,\n email,\n invoiceid,\n False,\n False,\n ),\n )\n\n return get_diagonalleys_order(order_id)\n\n\ndef get_diagonalleys_order(order_id: str) -> Optional[Orders]:\n with open_ext_db(\"diagonalley\") as db:\n row = db.fetchone(\"SELECT * FROM diagonalley.orders WHERE id = ?\", (order_id,))\n\n return Orders(**row) if row else None\n\n\ndef get_diagonalleys_orders(wallet_ids: Union[str, List[str]]) -> List[Orders]:\n if isinstance(wallet_ids, str):\n wallet_ids = [wallet_ids]\n\n with open_ext_db(\"diagonalley\") as db:\n q = \",\".join([\"?\"] * len(wallet_ids))\n rows = db.fetchall(\n f\"SELECT * FROM diagonalley.orders WHERE wallet IN ({q})\", (*wallet_ids,)\n )\n for r in rows:\n PAID = (await WALLET.get_invoice_status(r[\"invoiceid\"])).paid\n if PAID:\n with open_ext_db(\"diagonalley\") as db:\n db.execute(\n \"UPDATE diagonalley.orders SET paid = ? WHERE id = ?\",\n (\n True,\n r[\"id\"],\n ),\n )\n rows = db.fetchall(\n f\"SELECT * FROM diagonalley.orders WHERE wallet IN ({q})\",\n (*wallet_ids,),\n )\n return [Orders(**row) for row in rows]\n\n\ndef delete_diagonalleys_order(order_id: str) -> None:\n with open_ext_db(\"diagonalley\") as db:\n db.execute(\"DELETE FROM diagonalley.orders WHERE id = ?\", (order_id,))\n", "sub_path": "lnbits/extensions/diagonalley/crud.py", "file_name": "crud.py", "file_ext": "py", "file_size_in_byte": 9564, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "re.compile", "line_number": 10, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 33, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64encode", "line_number": 34, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Products", "line_number": 32, "usage_type": "name"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 58, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 55, "usage_type": "name"}, {"api_name": "models.Indexers", "line_number": 55, "usage_type": "name"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 71, "usage_type": "call"}, {"api_name": "models.Products", "line_number": 76, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 70, "usage_type": "name"}, {"api_name": "models.Products", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 79, "usage_type": "name"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Products", "line_number": 89, "usage_type": "call"}, {"api_name": "models.Products", "line_number": 79, "usage_type": "name"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 93, "usage_type": "call"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 110, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64encode", "line_number": 111, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 111, "usage_type": "call"}, {"api_name": "models.Indexers", "line_number": 109, "usage_type": "name"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 137, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 134, "usage_type": "name"}, {"api_name": "models.Indexers", "line_number": 134, "usage_type": "name"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 150, "usage_type": "call"}, {"api_name": "httpx.get", "line_number": 155, "usage_type": "call"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 159, "usage_type": "call"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 168, "usage_type": "call"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 178, "usage_type": "call"}, {"api_name": "models.Indexers", "line_number": 182, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 149, "usage_type": "name"}, {"api_name": "models.Indexers", "line_number": 149, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 185, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 185, "usage_type": "name"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 189, "usage_type": "call"}, {"api_name": "httpx.get", "line_number": 197, "usage_type": "call"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 199, "usage_type": "call"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 208, "usage_type": "call"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 218, "usage_type": "call"}, {"api_name": "models.Indexers", "line_number": 223, "usage_type": "call"}, {"api_name": "models.Indexers", "line_number": 185, "usage_type": "name"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 227, "usage_type": "call"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 247, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64encode", "line_number": 248, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 248, "usage_type": "call"}, {"api_name": "models.Indexers", "line_number": 246, "usage_type": "name"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 273, "usage_type": "call"}, {"api_name": "models.Orders", "line_number": 276, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 272, "usage_type": "name"}, {"api_name": "models.Orders", "line_number": 272, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 279, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 279, "usage_type": "name"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 283, "usage_type": "call"}, {"api_name": "lnbits.settings.WALLET.get_invoice_status", "line_number": 289, "usage_type": "call"}, {"api_name": "lnbits.settings.WALLET", "line_number": 289, "usage_type": "name"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 291, "usage_type": "call"}, {"api_name": "models.Orders", "line_number": 303, "usage_type": "call"}, {"api_name": "models.Orders", "line_number": 279, "usage_type": "name"}, {"api_name": "lnbits.db.open_ext_db", "line_number": 307, "usage_type": "call"}]} +{"seq_id": "571757147", "text": "import pandas as pd\nimport numpy as np\nfrom pandas import DataFrame, Series\nimport itertools\n\n# iterate through events, keeping track of previously seen\n# For videos, we need to tag\n# 1) if they are being viewed out of the intended order,\n# 2) if they have already been seen\n# 3) if they are being immediately reviewed\n# 4) if they are being paused, and if so are they being paused multiple times\n# 5) if their playback rate is being changed, and if so is it being changed multiple times\nclass LectureView:\n\t\"\"\"Processes lecture views, keeping track of last seen, etc\"\"\"\n\tdef __init__(self):\n\t\tself.seen = []\n\n\tdef proc_sequence(self, row):\n\t\tl = row['lecture_id']\n\t\tif self.seen == []:\n\t\t\tself.seen.append(l)\n\t\t\treturn None # no tag\n\t\tif l == self.seen[-1]:\n\t\t\treturn 'immediate-review'\n\t\tif l in self.seen:\n\t\t\tself.seen.append(l)\n\t\t\treturn 'seen-before'\n\t\tif l-1 in self.seen: # immediately following previously seen value, no tag\n\t\t\tself.seen.append(l)\n\t\t\treturn 'normal'\n\t\t# end of the list, if it hasn't been caught already, should mean out-of-sequence\n\t\tself.seen.append(l)\n\t\treturn 'out-of-sequence'\n\n\tdef proc(self, row):\n\t\tseq = self.proc_sequence(row)\n\t\ttags = []\n\t\tif seq:\n\t\t\ttags.append(seq)\n\t\tfor x in ['seeked', 'pause', 'ratechange']:\n\t\t\tif x in row:\n\t\t\t\ttags.append(x)\n\t\treturn tags\n\ndef dispatch(vals):\n\tproc = LectureView()\n\treturn [proc.proc(v) for v in vals]\n\tif action in handlers.keys():\n\t\treturn(handlers[action][val])\n\telse:\n\t\treturn ''\n\ndef join_value(frame, store, values):\n\tfor val in values:\n\t\tval_series = store[val].to_frame().reset_index().\\\n\t\t\trename(columns={'index':val + '_val', 0: val})\n\t\tval_series = val_series.set_index(val)\n\t\tframe = frame.set_index(val)\n\t\tframe = frame.join(val_series, how='outer')\n\t\tframe = frame.reset_index()\n\treturn(frame)\n\ndef get_index_by_value(pdseries, value):\n\t\"Looks up Pandas.Series index by value\"\n\treturn(pdseries[pdseries == value].index[0])\n\nclass ActionConverter(object):\n\tdef __init__(self, store):\n\t\tself.store = store\n\t\tself.lecture_action = store['action']['lecture/view']\n\t\tself.handlers = {'lecture/view': LectureView}\n\n\tdef convert(self, user, max_time = None):\n\t\tstore = self.store\n\t\tterm = \"username = %s\" % user\n\t\tif max_time:\n\t\t\tterm += \" & index < %d\" % max_time\n\t\tu = store.select('db', pd.Term(term))\n\t\tusername = get_index_by_value(self.store['username'], user)\n\n\t\thandlers = {k:v() for k,v in self.handlers.items()} # initialize handlers\n\n\t\t# checking if there are any events\n\t\tif len(u) == 0:\n\t\t\treturn(None)\n\n\t\tu = u.reset_index()\n\t\tu = u.sort(columns = 'timestamp')\n\t\tu['duration'] = u.timestamp.diff()\n\t\tu.duration = u.duration.shift(-1)\n\t\ttuples = [tuple(x) for x in u[['timestamp', 'action', 'lecture_id']].\n\t\t\t\t to_records(index=False)]\n\t\tvideo_sections = [list(g) for k,g in itertools.\n\t\t\t\t\t\t groupby(tuples, lambda x: (x[1], x[2]))]\n\n\t\tu = join_value(u, store, ['type', 'action'])\n\t\tvideo_events = []\n\t\tu = u.set_index('timestamp')\n\t\tfor segment in video_sections:\n\t\t\t# **************************************************\n\t\t\t# first, reduce multi-line video events to one line and pass through other events\n\t\t\t# into a dict, which we can further process\n\t\t\trows = []\n\t\t\tfor event in segment:\n\t\t\t\trows.append(event[0])\n\n\t\t\tr = u.loc[rows]\n\n\t\t\t# if not a video event, just write it in\n\t\t\tif r.head(1).action.values[0] != self.lecture_action:\n\t\t\t\treduce_dict = r.reset_index().to_dict(outtype='records')[0]\n\n\t\t\t# if a video event, reduce down to one line\n\t\t\telse:\n\t\t\t\treduce_dict = r.type_val.value_counts().to_dict()\n\t\t\t\tduration = r.duration.sum()\n\t\t\t\ttimestamp = r.head(1).index.item()\n\t\t\t\taction_val = r.head(1).action_val.values[0]\n\t\t\t\treduce_dict.update({\"duration\": duration,\n\t\t\t\t\t\t\t\t\t\"lecture_id\": r.head(1).lecture_id.values[0],\n\t\t\t\t\t\t\t\t\t\"action\": self.lecture_action,\n\t\t\t\t\t\t\t\t\t\"username\": user,\n\t\t\t\t\t\t\t\t\t\"timestamp\": timestamp,\n\t\t\t\t\t\t\t\t\t\"action_val\": action_val})\n\n\t\t\t# ************************************************\n\t\t\t# parse video events depending on action_type, and add tags\n\t\t\ttags = []\n\t\t\tif reduce_dict['action_val'] in handlers.keys():\n\t\t\t\ttags = handlers[reduce_dict['action_val']].proc(reduce_dict)\n\t\t\tif reduce_dict['duration'] < 3:\n\t\t\t\ttags.append('short-event')\n\t\t\treduce_dict['tags'] = tags\n\n\t\t\tvideo_events.append(reduce_dict)\n\n\t\t# **************************************************\n\t\t# Formatting for arules-sequence\n\n\t\ttxt = ''\n\t\tfor event in video_events:\n\t\t\ttagcnt = 1\n\t\t\tif 'tags' in event:\n\t\t\t\ttagcnt += len(event['tags'])\n\t\t\t\ttags = ' '.join(event['tags'])\n\t\t\telse:\n\t\t\t\ttags = ''\n\t\t\tline = \"%s %s %d %s %s\" % (username, event['timestamp'], tagcnt, event['action_val'], tags)\n\t\t\ttxt += line + \"\\n\"\n\n\t\treturn (txt)\n", "sub_path": "prep_sequences/action_converter.py", "file_name": "action_converter.py", "file_ext": "py", "file_size_in_byte": 4646, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "pandas.Term", "line_number": 78, "usage_type": "call"}, {"api_name": "itertools.groupby", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "92886576", "text": "from os import mkdir\nfrom os.path import isdir\nimport csv\nimport json\n\n# Naked - Used to run node calls for d3 scripts\nfrom Naked.toolshed.shell import muterun_js\n\n# for making curl requests\nimport requests\n\n## SOME CONSTANTS\n## town profile data endpoints\nendpoints = {\n \"town\" : \"http://profiles.ctdata.org/profiles/api/v1/data/town\",\n \"county\" : \"http://profiles.ctdata.org/profiles/api/v1/data/county\",\n \"state\" : \"http://profiles.ctdata.org/profiles/api/v1/data/state\",\n \"pdf\" : \"http://192.168.33.101/download\"\n}\n\n\n# This will make sure all the output directories exist\n# town, county, and state subdirectories of /data will contain the data that comes directly from the API\n# /data/requests will contain JSON output of intermediary script that reshapes data from API -> Request format\nif not isdir(\"data\"):\n mkdir(\"data\")\n\nif not isdir(\"data/town\"):\n mkdir(\"data/town\")\n\nif not isdir(\"data/county\"):\n mkdir(\"data/county\")\n\nif not isdir(\"data/state\"):\n mkdir(\"data/state\")\n\nif not isdir(\"data/requests\"):\n mkdir(\"data/requests\")\n\n# /pdfs will contain the final pdfs after they are returned from the reports server\nif not isdir(\"pdfs\"):\n mkdir(\"pdfs\")\n\n\n# Reads CSV file (in this directory) and gets list of towns with their counties.\nwith open(\"towns.csv\", \"r\") as townListFile:\n townListReader = csv.DictReader(townListFile)\n towns = [town for town in townListReader]\n\n###\n# If you need to run a single town, or a small set of towns,\n# you can manually create a list of them as such:\n###\n\n###### A single Town\n#\n# towns = [\n# # {'Town': 'Suffield', 'County': 'Hartford County', 'State': 'Connecticut'},\n# {'Town': 'Bethany', 'County': 'New Haven County', 'State': 'Connecticut'}\n# ]\n\n###### or a few Towns\n\n# towns = [\n# {'Town' : 'Bristol','County' : 'Hartford County','State' : 'Connecticut'},\n# {'Town' : 'Cornwall','County' : 'Litchfield County','State' : 'Connecticut'},\n# {'Town' : 'Enfield','County' : 'Hartford County','State' : 'Connecticut'},\n# ]\n\n\n# get all town data\nfor town in set([town[\"Town\"] for town in towns]):\n print(\"Getting data for \"+town)\n try:\n townData = requests.get(\"/\".join([endpoints[\"town\"], town]))\n\n # save to file\n with open(\"data/town/\"+town+\".json\", \"w\") as townDataFile:\n json.dump(townData.json(), townDataFile)\n except:\n print(\"Error!\")\n print(\"/\".join([endpoints[\"town\"], town]))\n\n# get all county data\nfor county in set([town[\"County\"] for town in towns]):\n print(\"Getting data for \"+county)\n try:\n countyData = requests.get(\"/\".join([endpoints[\"county\"], county]))\n\n # save to file\n with open(\"data/county/\"+county+\".json\", \"w\") as countyDataFile:\n json.dump(countyData.json(), countyDataFile)\n except:\n print(\"Error!\")\n print(\"/\".join([endpoints[\"county\"], county]))\n\n# get all state data\nfor state in set([town[\"State\"] for town in towns]):\n print(\"Getting data for \"+state)\n try:\n stateData = requests.get(\"/\".join([endpoints[\"state\"], state]))\n\n # save to file\n with open(\"data/state/\"+state+\".json\", \"w\") as stateDataFile:\n json.dump(stateData.json(), stateDataFile)\n except:\n print(\"Error!\")\n print(\"/\".join([endpoints[\"state\"], state]))\n\n# Send data through api2pdf node script\nfor town in towns:\n print(\"Converting to PDF request for \"+town[\"Town\"]+\", \"+town[\"County\"]+\", \"+town[\"State\"])\n townFlag = \"--town='./data/town/\"+town[\"Town\"]+\".json'\"\n countyFlag = \"--county='./data/county/\"+town[\"County\"]+\".json'\"\n stateFlag = \"--state='./data/state/\"+town[\"State\"]+\".json'\"\n\n jsonRequest = muterun_js(\"api2pdf.js\", \" \".join([townFlag, countyFlag, stateFlag]))\n\n try:\n with open(\"data/requests/\" + town[\"Town\"] + \".json\", \"w\") as requestOutputFile:\n json.dump(json.loads(jsonRequest.stdout), requestOutputFile)\n except Exception as e:\n print(\"Error!\")\n print(e)\n print(\" \".join([townFlag, countyFlag, stateFlag]))\n\n# Get pdfs!\nfor town in towns:\n print(\"Creating PDF for \"+town[\"Town\"]+\", \"+town[\"County\"]+\", \"+town[\"State\"])\n try:\n with open(\"data/requests/\" + town[\"Town\"] + \".json\", \"r\") as requestFile:\n request = {\"data\" : json.dumps(json.load(requestFile))}\n\n pdf = requests.get(endpoints[\"pdf\"], data=request)\n\n with open(\"pdfs/\" + town[\"Town\"] + \".pdf\", \"wb\") as pdfFile:\n pdfFile.write(pdf.content)\n except Exception as e:\n print(\"Error!\")\n print(town)\n print(e)\n", "sub_path": "profiles/make_pdfs.py", "file_name": "make_pdfs.py", "file_ext": "py", "file_size_in_byte": 4598, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "os.path.isdir", "line_number": 25, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 28, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 34, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 37, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 42, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 75, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 79, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 88, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 92, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 101, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 105, "usage_type": "call"}, {"api_name": "Naked.toolshed.shell.muterun_js", "line_number": 117, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 121, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 121, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 132, "usage_type": "call"}, {"api_name": "json.load", "line_number": 132, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "479323840", "text": "from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Salary, Employee\nfrom .forms import AddForm\nfrom django.contrib import messages\n\n\n# Create your views here.\ndef salary_view(request):\n if not request.user.is_authenticated():\n return redirect('home')\n\n employee_list = Employee.objects.filter(is_active=True)\n salary_list = Salary.objects.all()\n # salary = get_object_or_404(Salary)\n\n # form = AddForm(request.POST or None, instance=salary)\n\n form = AddForm(request.POST or None)\n if form.is_valid():\n salary = form.save(commit=False)\n salary.s_voter = request.user\n salary.save()\n messages.success(request, \"Başarılı bir şekilde oluşturdunuz.\", extra_tags='mesaj-basarili')\n # return HttpResponseRedirect(fault.get_absolute_url()) # yapılcak\n # return reverse('fault:index', {})\n return redirect('home')\n\n context = {\n 'salary_list': salary_list,\n 'employee_list': employee_list,\n 'form': form\n }\n\n return render(request, 'salary/index.html', context)\n\n\ndef add_view(request):\n if not request.user.is_authenticated():\n return redirect('home')\n\n form = AddForm(request.POST or None)\n if form.is_valid():\n salary = form.save(commit=False)\n salary.s_voter = request.user\n salary.save()\n messages.success(request, \"Başarılı bir şekilde oluşturdunuz.\", extra_tags='mesaj-basarili')\n # return HttpResponseRedirect(fault.get_absolute_url()) # yapılcak\n # return reverse('fault:index', {})\n return redirect('home')\n\n context = {\n 'form': form\n }\n\n return render(request, \"salary/add.html\", context)\n\n", "sub_path": "salary/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1727, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.shortcuts.redirect", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Employee.objects.filter", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Employee.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.Employee", "line_number": 12, "usage_type": "name"}, {"api_name": "models.Salary.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Salary.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "models.Salary", "line_number": 13, "usage_type": "name"}, {"api_name": "forms.AddForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 23, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 39, "usage_type": "call"}, {"api_name": "forms.AddForm", "line_number": 41, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 46, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 46, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 49, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "217615809", "text": "# Import dash-core, dash-html, dash io, bootstrap\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\n# Dash Bootstrap components\nimport dash_bootstrap_components as dbc\n\n# Navbar, layouts, custom callbacks\nfrom navbar import Navbar\nfrom layouts import bancoLayout, reslayout\n#, spsLayout\nimport callbacks\n\n# Import app\nfrom app import app\n# Import server for deployment\nfrom app import server as server\n\n\n# Layout variables, navbar, header, content, and container\nnav = Navbar()\n\nheader = dbc.Row(\n dbc.Col(\n html.Div([\n # html.H2(children='Estudios Nacionales de Calidad'),\n # html.H3(children='Encuesta Nacional de Imagen del IMSS 2021')\n ])\n \n ),className='banner')\n\ncontent = html.Div([\n dcc.Location(id='url'),\n html.Div(id='page-content')\n])\n\ncontainer = dbc.Container([\n header,\n content,\n])\n\n\n# Menu callback, set and return\n# Declair function that connects other pages with content to container\n@app.callback(Output('page-content', 'children'),\n [Input('url', 'pathname')])\ndef display_page(pathname):\n if pathname == '/res':\n return reslayout\n # if pathname == '/':\n # return html.Div([\n # html.Iframe(src='assets/SP.html', width='100%', height='1400', \n # style={'background': '13322B', 'border':'none'},\n # )\n # ],className='home')\n elif pathname == '/banco':\n return bancoLayout\n # elif pathname == '/antecedente':\n # return spsLayout\n # elif pathname == '/res':\n # return reslayout\n else:\n return 'Relax!: Página en construcción!'\n\n\n# Main index function that will call and return all layout variables\ndef index():\n layout = html.Div([\n nav,\n container\n ])\n return layout\n\n# Set layout to index function\napp.layout = index()\n\n# Call app server\nif __name__ == '__main__':\n # set debug to false when deploying app\n app.run_server()\n #app.run_server('localhost', port=8050, debug=True)\n\n\n\n", "sub_path": "index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 2096, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "navbar.Navbar", "line_number": 22, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 24, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 25, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 26, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 33, "usage_type": "call"}, {"api_name": "dash_core_components.Location", "line_number": 34, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 35, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Container", "line_number": 38, "usage_type": "call"}, {"api_name": "layouts.reslayout", "line_number": 50, "usage_type": "name"}, {"api_name": "layouts.bancoLayout", "line_number": 58, "usage_type": "name"}, {"api_name": "app.app.callback", "line_number": 46, "usage_type": "call"}, {"api_name": "app.app", "line_number": 46, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 46, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 47, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 69, "usage_type": "call"}, {"api_name": "app.app.layout", "line_number": 76, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 76, "usage_type": "name"}, {"api_name": "app.app.run_server", "line_number": 81, "usage_type": "call"}, {"api_name": "app.app", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "158199797", "text": "# File: scrub.py\n# Author: Jay Oliver\n# Date Created: 13/03/2020\n# Last Modified: 21/04/2020\n# Purpose: This file contains all functions that relate to web page\n# scrubbing\n# Comments: More than one scrubbing fuction maybe created so as to\n# allow the scrubbing of different stats\n#\n# The structure of the data that is parsed from the webpage (in the\n# data list) is:\n# Lewis Gun # weapon name\n# LMG # weapon class\n# 737 # kills\n# 1.48 # kpm\n# 8h 19m 50s # time played\n# 21,624 # shots fired\n# 4,260 # shots hit\n# 19.70 # accuracy\n# 169 # headshots\n# ...next weapon\n# Note that this may not hold true if more stats are presented on\n# the webpage\n\nfrom re import search\nfrom bs4 import BeautifulSoup\n\n\ndef weaps(page):\n \"\"\"Scrub and return the stats present on battlefield tracker webpage.\n\n\n This function scrubs the weapons page of\n battlefieldtracker.com for the stats on all the listed weapons\n and returns them in a list.\n\n parameters:\n - page: This is the response from the requests.get()\n method which contains all the html of the site\n returns:\n - data: This is a list containing the stats for each weapon.\n The order of the elements is given in the comment\n of this file.\n \"\"\"\n # these are entries that appear in the final parse (the data list) and need\n # to be removed\n bad_ent = [\"Search Profile\", \"Search\", \"Home\", \"My Profile\",\n \"Leaderboards\", \"Challenges\", \"More\", \"Link Profile\",\n \"Score/min\", \"K/D\", \"Rank\", \"Win %\", \"Kills\",\n \"Kills/min\", \"Time Played\", \"Shots Fired\", \"Shots Hit\",\n \"Shots Accuracy\", \"Headshots\", \"--\", \"Premium\"]\n soup = BeautifulSoup(page.content, \"html.parser\")\n parsed_soup = [str(i) for i in soup.find_all(\"span\", [\"name\", \"sub\"])]\n data = [search(r\">.*<\", i).group(0)[1:-1]\n for i\n in parsed_soup\n if search(r\">.*<\", i).group(0)[1:-1] not in bad_ent]\n\n return data\n\n\ndef overview(page):\n \"\"\"Scrub and return stats from the overview page on battlefield.tracker.com\n\n parameters:\n - page: This is the response from the requests.get()\n method which contains all the html of the site\n - returns:\n - data: a list containing the parsed data from the html\n \"\"\"\n bad_ent = [\"Search Profile\", \"Search\", \"Home\", \"My Profile\",\n \"Leaderboards\", \"Challenges\", \"More\", \"Link Profile\", \"Premium\"]\n soup = BeautifulSoup(page.content, \"html.parser\")\n parsed_soup = [str(i) for i in soup.find_all(\"span\", [\"name\", \"value\",\n \"playtime\"])]\n data = [search(r\">.*<\", i).group(0)[1:-1]\n for i\n in parsed_soup\n if search(r\">.*<\", i).group(0)[1:-1] not in bad_ent\n and \"viewbox\" not in search(r\">.*<\", i).group(0)[1:-1]]\n\n return data\n", "sub_path": "battlefield/scrub.py", "file_name": "scrub.py", "file_ext": "py", "file_size_in_byte": 3186, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 52, "usage_type": "call"}, {"api_name": "re.search", "line_number": 54, "usage_type": "call"}, {"api_name": "re.search", "line_number": 57, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 73, "usage_type": "call"}, {"api_name": "re.search", "line_number": 76, "usage_type": "call"}, {"api_name": "re.search", "line_number": 79, "usage_type": "call"}, {"api_name": "re.search", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "590410632", "text": "\"\"\"Stat tracker for steemd API call performance.\"\"\"\n\nimport atexit\nfrom hive.utils.system import colorize, peak_usage_mb\n\n# pylint: disable=missing-docstring\n\nclass ClientStats:\n \"\"\"Collects steemd API timing data.\"\"\"\n\n # Assumed HTTP overhead (ms); subtract prior to par check\n PAR_HTTP_OVERHEAD = 75\n\n # Reporting threshold (x * par)\n PAR_THRESHOLD = 1.1\n\n # Thresholds for critical call timing (ms)\n PAR_STEEMD = {\n 'get_dynamic_global_properties': 20,\n 'get_block': 50,\n 'get_blocks_batch': 5,\n 'get_accounts': 3,\n 'get_content': 4,\n 'get_order_book': 20,\n 'get_feed_history': 20,\n }\n\n stats = {}\n ttltime = 0.0\n fastest = None\n\n @classmethod\n def log(cls, method, ms, batch_size=1):\n cls.add_to_stats(method, ms, batch_size)\n cls.check_timing(method, ms, batch_size)\n if cls.fastest is None or ms < cls.fastest:\n cls.fastest = ms\n if cls.ttltime > 30 * 60 * 1000:\n cls.print()\n\n @classmethod\n def add_to_stats(cls, method, ms, batch_size):\n if method not in cls.stats:\n cls.stats[method] = [ms, batch_size]\n else:\n cls.stats[method][0] += ms\n cls.stats[method][1] += batch_size\n cls.ttltime += ms\n\n @classmethod\n def check_timing(cls, method, ms, batch_size):\n if method == 'get_block' and batch_size > 1:\n method = 'get_blocks_batch'\n per = int((ms - cls.PAR_HTTP_OVERHEAD) / batch_size)\n par = cls.PAR_STEEMD[method]\n over = per / par\n if over >= cls.PAR_THRESHOLD:\n out = (\"[STEEM][%dms] %s[%d] -- %.1fx par (%d/%d)\"\n % (ms, method, batch_size, over, per, par))\n print(colorize(out))\n\n @classmethod\n def print(cls):\n if not cls.stats:\n return\n ttl = cls.ttltime\n print(\"[STATS] sampled steem time: {}s\".format(int(ttl / 1000)))\n for arr in sorted(cls.stats.items(), key=lambda x: -x[1][0])[0:40]:\n sql, vals = arr\n ms, calls = vals\n print(\"% 5.1f%% % 9sms % 7.2favg % 8dx -- %s\"\n % (100 * ms/ttl, \"{:,}\".format(int(ms)),\n ms/calls, calls, sql[0:180]))\n print(\"[STATS] fastest steem call was %.3fms\" % cls.fastest)\n print(\"[STATS] peak memory usage: %.2fMB\" % peak_usage_mb())\n cls.clear()\n\n @classmethod\n def clear(cls):\n \"\"\"Reset accumulators\"\"\"\n cls.stats = {}\n cls.ttltime = 0\n\natexit.register(ClientStats.print)\n", "sub_path": "hive/steem/stats.py", "file_name": "stats.py", "file_ext": "py", "file_size_in_byte": 2577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "hive.utils.system.colorize", "line_number": 60, "usage_type": "call"}, {"api_name": "hive.utils.system.peak_usage_mb", "line_number": 75, "usage_type": "call"}, {"api_name": "atexit.register", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "324982246", "text": "#! /usr/bin/python3\nimport psycopg2\nimport sys\n\ndef times(dbname):\n\tcon = psycopg2.connect(dbname)\n\tcur = con.cursor()\n\tcur.execute(\"select count(*) from combatant\")\n\tcount = cur.fetchall()[0][0]\n\tmost = [0, 0] \n\tleast = [99999, 0]\n\tfor i in range(1, count+1):\n\t\tcur.execute(\"select start, finish from fight where combatant_one = %s or combatant_two = %s\",( i, i))\n\t\tfights = cur.fetchall()\n\t\ttime_spent = 0\n\t\tfor a in fights:\n\t\t\ta = a[1] - a[0]\n\t\t\ttime_spent += a.total_seconds()\n\t\tif time_spent > most[0]:\n\t\t\tmost[0] = time_spent\n\t\t\tmost[1] = i\n\t\tif time_spent < least[0]:\n\t\t\tleast[0] = time_spent\n\t\t\tleast[1] = i\n\tcon.close()\n\tprint(most[1], \"fought the longest at\", most[0], \"seconds\")\n\tprint(least[1], \"fought the least at\", least[0], \"seconds\")\n\n\ndef fights(dbname):\n\tcon = psycopg2.connect(dbname)\n\tcur = con.cursor()\n\tcur.execute(\"select count(*) from combatant\")\n\tcount = cur.fetchall()[0][0]\n\tmost = [0, 0]\n\tleast = [0, 0]\n\tfor i in range(1, count+1):\n\t\tcur.execute(\"select count(*) from fight where 'One' = winner and combatant_one = %s or 'Two' = winner and combatant_two = %s\",( i, i))\n\t\twins = cur.fetchall()[0][0]\n\t\tif wins > most[0]:\n\t\t\tmost[0] = wins\n\t\t\tmost[1] = i\n\t\tcur.execute(\"select count(*) from fight where 'Two' = winner and combatant_one = %s or 'One' = winner and combatant_two = %s\",( i, i))\n\t\tlosses = cur.fetchall()[0][0]\n\t\tif losses > least[0]:\n\t\t\tleast[0] = losses\n\t\t\tleast[1] = i\n\tcon.close()\n\tprint(\"most wins and least wins go to\")\n\tprint(most[1], \"won\", most[0], \"times\")\n\tprint(least[1], \"lost\" ,least[0], \"times\")\n\n\t\ndef attacks(dbname):\n\tcon = psycopg2.connect(dbname)\n\tcur = con.cursor()\n\tcur.execute(\"select count(*) from species\")\n\tcount = cur.fetchall()[0][0]\n\tmost = [0, 0]\n\tfor i in range(1, count+1):\n\t\tcur.execute(\"select count(*) from species_attack where species_id = %s\",(i,))\n\t\tattack_count = cur.fetchall()[0][0]\n\t\tif attack_count > most[0]:\n\t\t\tmost[0] = attack_count\n\t\t\tmost[1] = i\n\tcon.close()\n\tprint(most[1], \"has the most attacks at\", most[0])\n\ndef species(dbname):\n\tcon = psycopg2.connect(dbname)\n\tcur = con.cursor()\n\tcur.execute(\"select count(*) from combatant\")\n\tcount = cur.fetchall()[0][0]\n\tfighters = []\n\tspecies_list = {}\n\tfor i in range(1, count+1):\n\t\tcur.execute(\"select count(*) from fight where 'One' = winner and combatant_one = %s or 'Two' = winner and combatant_two = %s\",( i, i))\n\t\tfighters.append([])\n\t\tfighters[i-1].append(i)\n\t\tfighters[i-1].append(cur.fetchall()[0][0])\n\t\tcur.execute(\"select count(*) from fight where 'Two' = winner and combatant_one = %s or 'One' = winner and combatant_two = %s\",( i, i))\n\t\tfighters[i-1].append(cur.fetchall()[0][0])\n\t\tcur.execute(\"select count(*) from fight where 'Tie' = winner and combatant_one = %s or 'Tie' = winner and combatant_two = %s\",( i, i))\n\t\tfighters[i-1].append(cur.fetchall()[0][0])\n\tfor fighter in fighters:\n\t\tcur.execute(\"select species_id from combatant where id = %s\", (fighter[0],))\n\t\tcur.execute(\"select name from species where id = %s\", (cur.fetchall()[0][0],))\n\t\tret_val = cur.fetchall()[0][0]\n\t\tif ret_val in species_list.keys():\n\t\t\tspecies_list[ret_val][0].append(fighter[0])\n\t\t\tspecies_list[ret_val][1] += fighter[1]\n\t\t\tspecies_list[ret_val][2] += fighter[2]\n\t\t\tspecies_list[ret_val][3] += fighter[3]\n\t\telse:\n\t\t\tspecies_list[ret_val] = [[fighter[0]], fighter[1], fighter[2], fighter[3]]\n\tcon.close()\n\twin, loss, draw = \"wins\", \"losses\", \"draws\"\n\tfor i in species_list:\n\t\tif species_list[i][1] == 1:\n\t\t\twin = \"wins\"\n\t\telse:\n\t\t\twin = \"win\"\n\t\tif species_list[i][2] == 1:\n\t\t\tloss = \"losses\"\n\t\telse:\n\t\t\tloss = \"loss\"\n\t\tif species_list[i][3] == 1:\n\t\t\tdraw = \"draws\"\n\t\telse:\n\t\t\tdraw = \"draw\"\n\t\tprint(\"%s\\t\\t %d %s\\t%d %s\\t%d %s\" %(i, species_list[i][1], win, species_list[i][2], loss, species_list[i][3], draw))\n\ndef main():\n\tif len(sys.argv) != 2:\n\t\tprint(\"you must supply this program with a name of a database that you can connect to\")\n\t\tprint(\"ex. ./stats.py dbname\")\n\t\texit(1)\n\ttry:\n\t\tdbname = \"dbname=\"+sys.argv[1]\n\t\tcon = psycopg2.connect(dbname)\n\t\tcon.close()\n\texcept:\n\t\tprint(\"unable to connect to database %s, please ensure you have access or permission to this database\" %sys.argv[1])\n\t\texit(1)\n\ttimes(dbname)\n\tfights(dbname)\n\tattacks(dbname)\n\tspecies(dbname)\n\t\nif __name__ == \"__main__\":\n\tmain()\n", "sub_path": "stats.py", "file_name": "stats.py", "file_ext": "py", "file_size_in_byte": 4238, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "psycopg2.connect", "line_number": 6, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 31, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 55, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 70, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 114, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 119, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 120, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 123, "usage_type": "attribute"}]} +{"seq_id": "79224922", "text": "import os\nimport sys\nimport json\nimport cp_global\n#import unit_test_common_utils\n#import pp\nimport logging\nimport colorlog\nimport time\nfrom time import strftime, gmtime\nfrom configparser import SafeConfigParser\n\n\n\ncwd = os.getcwd()\nconfig = SafeConfigParser()\nconfig_file = os.getcwd()+\"/unit_suite/conf_tcs/unit_setup.conf\"\n \nstart_time = strftime(\"%Y%m%d%H%M\", gmtime())\n\n# Read the test setup details specfic test cases.\n\n\ndef init_unit():\n # TBD : Move this to common framework lib and pass flags like mrc, unit etc.\n # Depending on flags make logging specific to that test option.\n logger = get_logger()\n # Phase 1: MRC Test bed/environment configuration validation.\n #deploy_test_vms(logger)\n # Phase 2: We now have a test bed to execute MRC test cases.\n run_tests()\n\n # Phase 3: Test Suite execution report.\n #report_mgmt_update()\n \n # Phase 4: Cleanup of test setup.\n #shiva_the_destructor()\n\n##common function to start execution.\ndef run_tests():\n config.read(config_file)\n tc_dir = cwd+config.get('setup', 'tc_dir')\n report_dir = config.get('setup', 'report_location')\n node = config.get('setup', 'node')\n report_file = os.getcwd()+report_dir+start_time+\".html\"\n logger.info(\"Running test Unit test cases\")\n logger.info(\"test case directory : %s\",tc_dir)\n\t##Calling pytest function for executing the test\n os.system(\"python3 -m pytest %s -s -v --tb=line --html=%s -n %s\" % (tc_dir , report_file , node ))\n\n# Currently, logging happens at the test suite level.\n# TBD : TC Level logging and process level logging\ndef get_logger():\n\n global logger\n path = os.getcwd() + \"/unit_suite/logs_tcs/\"\n #from logging.handlers import TimedRotatingFileHandler\n #logger = TimedRotatingFileHandler(LOG_FILE, when=LOG_ROTATION_TIME)\n # __name__ is a better choice as one exactly which module called logger.\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n # Unique name for each run of mrc tests.\n nowtime = strftime(\"%Y-%m-%d-%H-%M\", gmtime())\n path = path + \"Unit-Tests\" + nowtime + \".log\"\n global unit_log_name\n unit_log_name = path\n handler = logging.FileHandler(path)\n handler.setLevel(logging.INFO)\n\n # Create a common logging format\n # Return a logger with a default ColoredFormatter.\n # You can also call logger.exception(msg, *args), it equals to logger.error(msg, exc_info=True, *args).\n formatter = colorlog.ColoredFormatter(\n \"%(log_color)s%(asctime)s-%(name)s-%(levelname)-4s%(reset)s%(white)s-%(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red',\n }\n )\n handler.setFormatter(formatter)\n\n # Add the handlers to the logger\n logger.addHandler(handler)\n\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(formatter)\n logger.addHandler(consoleHandler)\n\n logger.info(\"Setting up and Returning MRC test suite logger\")\n return logger\n\n", "sub_path": "unit_suite/lib_tcs/unit_tests.py", "file_name": "unit_tests.py", "file_ext": "py", "file_size_in_byte": 3346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.getcwd", "line_number": 15, "usage_type": "call"}, {"api_name": "configparser.SafeConfigParser", "line_number": 16, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 17, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 19, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 19, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 45, "usage_type": "call"}, {"api_name": "os.system", "line_number": 49, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 56, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 61, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 64, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 64, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 68, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 69, "usage_type": "attribute"}, {"api_name": "colorlog.ColoredFormatter", "line_number": 74, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "559888840", "text": "import argparse\nfrom slugify import slugify\n\nparser = argparse.ArgumentParser(description='Process some names.')\nparser.add_argument('names', metavar='str', nargs='*')\nparser.add_argument('--slug', dest='slugify', action='store_const', const=str)\nparser.add_argument('--mayus',dest='mayusculas', action='store_const', const=str)\nparser.add_argument('--minus', dest='minusculas', action='store_const',const=str)\nparser.add_argument('--ascii', dest='codigo_ascii', action='store_const',const=str)\nparser.add_argument('--reverse', dest='invertir', action='store_const', const=str)\n\ndef transform(list):\n a = \"\"\n for i in list:\n a = a + i + \" \"\n return a\n\nif __name__ == '__main__':\n args = parser.parse_args()\n list = args.names\n text = transform(list)\n\n print(\"El nombre proporcionado es *{}*:\".format(text.rstrip()))\n if args.slugify:\n print(\"- Su valor en slug es: {}\".format(slugify(text)))\n\n if args.mayusculas:\n print(\"- Su valor en mayúsculas es: {}\".format(text.upper()))\n\n if args.minusculas:\n print(\"- Su valor en minúsculas es: {}\".format(text.lower()))\n\n if args.codigo_ascii:\n sum = 0\n for letter in text.rstrip():\n sum+=ord(letter)\n print(\"- Su valor en ascii es: {}\".format(sum))\n\n if args.invertir:\n print(\"- Su valor invertido es: {}\".format(text[::-1].lstrip()))\n", "sub_path": "Ene-Jun-2019/Angelica Rodriguez/Practica_2_seg_parcial/argparse_slugify.py", "file_name": "argparse_slugify.py", "file_ext": "py", "file_size_in_byte": 1383, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 4, "usage_type": "call"}, {"api_name": "slugify.slugify", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "425316059", "text": "import requests\nimport os\nimport re\nimport base64\n\n\ndef get_gfw():\n res = requests.get(\n 'https://cdn.jsdelivr.net/gh/Loyalsoldier/surge-rules@release/ruleset/gfw.txt')\n if res.status_code != 200:\n raise Exception('Connect error')\n return res.text\n\n\nif __name__ == '__main__':\n gfw = get_gfw()\n gfw = gfw.split('\\n')\n gfw_file = open(os.getcwd() + '/temp/gfw.txt', mode='w', encoding='utf-8')\n for line in gfw:\n if len(line) > 0:\n gfw_file.write('%s,Proxy\\n' % line)\n gfw_file.close()\n", "sub_path": "scripts/build-gfwlist.py", "file_name": "build-gfwlist.py", "file_ext": "py", "file_size_in_byte": 541, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "46103898", "text": "#coding=utf8\nimport re, datetime, json, time, traceback\nimport tornado\nfrom tornado import (gen, httpserver, ioloop, web, httpclient)\n\nfrom tornado.options import define, options\ndefine(\"port\", default=19999)\n\nlocal_host = \"127.0.0.1\"\nasync_client = tornado.httpclient.AsyncHTTPClient(max_clients=100)\nconstruct_url = \"http://%s:19999/mss?\" % local_host\n# https://192.168.32.222:19999/mss?https://mp.weixin.qq.com/s?__biz=MzIxMzEzMjM5NQ==&mid=2651029560&idx=1&sn=437d90c61f84ea357c885dc8f94cea2b&chksm=8c4c553cbb3bdc2a0cdd2840e3d7ccfbecb62c83df3908f375bf587b0a0ed5504c73e55debe9&scene=38#wechat_redirect\n\nclass defaultHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def get(self):\n try:\n self.request.headers[\"host\"] = re.match('http(s)?://([^/ \\\"]*)/.*', self.request.query).groups()[1]\n self.request.headers[\"referer\"] = \"\"\n request_options = httpclient.HTTPRequest(url=self.request.query, method=\"GET\", headers=self.request.headers)\n result = yield async_client.fetch(request_options, raise_error=False)\n except Exception as e:\n self.finish(traceback.format_exc())\n\n self.set_header(\"Content-Type\", result.headers[\"Content-Type\"])\n if \"text/html\" in result.headers[\"Content-Type\"]:\n response = self.convert(result.body.decode(\"utf8\"))\n else:\n response = result.body\n self.finish(response)\n\n\n def convert_lazy_load(self, sub_result):\n text = sub_result.group()\n if \" data-src=\\\"\" in text:\n return text.replace(\" src=\\\"\", \" nosrc=\\\"\").replace(\" data-src=\\\"\", \" src=\\\"\")\n else:\n return text\n\n def convert(self, data):\n data = re.sub(r\"(http(s)?://[^/ >]*/)\", lambda sub_result: construct_url + str(sub_result.group(1)), data)\n data = re.sub(r\"]*>\", lambda sub_result: self.convert_lazy_load(sub_result), data)\n\n return data\n\n\n@gen.coroutine\ndef init():\n try:\n options.parse_command_line()\n\n settings = {\n \"autoreload\": True\n }\n app = web.Application([\n (\"/mss\", defaultHandler)\n ], **settings)\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n\n print(\"init success:\", options.port)\n print(\"http://%s:19999/mss?xxxxxx\" % local_host)\n except:\n print(traceback.format_exc())\n\n\nif __name__ == \"__main__\":\n init()\n ioloop.IOLoop.current().start()\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2497, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "tornado.options.define", "line_number": 7, "usage_type": "call"}, {"api_name": "tornado.httpclient.AsyncHTTPClient", "line_number": 10, "usage_type": "call"}, {"api_name": "tornado.httpclient", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 14, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 18, "usage_type": "call"}, {"api_name": "tornado.httpclient.HTTPRequest", "line_number": 20, "usage_type": "call"}, {"api_name": "tornado.httpclient", "line_number": 20, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 23, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 15, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 41, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 42, "usage_type": "call"}, {"api_name": "tornado.options.options.parse_command_line", "line_number": 50, "usage_type": "call"}, {"api_name": "tornado.options.options", "line_number": 50, "usage_type": "name"}, {"api_name": "tornado.web.Application", "line_number": 55, "usage_type": "call"}, {"api_name": "tornado.web", "line_number": 55, "usage_type": "name"}, {"api_name": "tornado.httpserver.HTTPServer", "line_number": 58, "usage_type": "call"}, {"api_name": "tornado.httpserver", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tornado.options.options.port", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 59, "usage_type": "name"}, {"api_name": "tornado.options.options.port", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 61, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 64, "usage_type": "call"}, {"api_name": "tornado.gen.coroutine", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 47, "usage_type": "name"}, {"api_name": "tornado.ioloop.IOLoop.current", "line_number": 69, "usage_type": "call"}, {"api_name": "tornado.ioloop.IOLoop", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "565081151", "text": "__author__ = 'taejune'\n\nimport threading\nimport serial\nimport sys, os, time\nimport signal\n\n \ndef main(): \n print('Simple App start...')\n\n print('Try connect to device...')\n with serial.Serial('/dev/tty.TJSWEAR_V1-SPPDev', 115200, timeout=1) as ser:\n print('Serial connection success!!!')\n\n\n print('start sensing...')\n ser.write('s')\n\n while True:\n getline = ser.readline()\n print('[TJSWEAR_V1]: ' + getline)\n time.sleep(0.05)\n\nif __name__ == '__main__':\n main()", "sub_path": "device_interface/simple/simple.py", "file_name": "simple.py", "file_ext": "py", "file_size_in_byte": 540, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "serial.Serial", "line_number": 13, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "444654263", "text": "\"\"\"\nThis script creates the training data splits for both the pose\nregression and wide baseline stereo matching tasks. \n\"\"\"\n\n\"\"\"\nImport the necessary libraries here\n\"\"\"\nimport numpy as np\nimport subprocess\nimport argparse\nimport pickle\nimport random\nimport math\nimport pdb\nimport sys\nimport os\n\nfrom PIL import Image, ImageDraw, ImageFont\nfrom utils import *\n\nrandom.seed(123)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset_dir', default='dataset/test/', type=str, \\\n help='Root directory of dataset')\nparser.add_argument('--save_dir', default='dataset/test/', type=str, \\\n help='Path to save the pose estimation and matching splits created')\nopts = parser.parse_args()\n\nsubprocess.call('mkdir %s'%(os.path.join(opts.save_dir, 'regTest')), shell=True)\n\n# Create pose regression and matching data\nnum_positive = int(subprocess.check_output('ls %s | wc -l'%(os.path.join(opts.save_dir, 'regTest', 'data')), shell=True)) // 2\n\nregpairs_negative = open(os.path.join(opts.save_dir, 'regTest', 'regpairs_negative.txt'), 'a')\n\n# This is used to decide the save path of images\ncount_matches = num_positive + 1\n\nfor base_dir in ['0096/']:\n \n targets = create_target_cache(opts.dataset_dir, base_dir) \n targetIDs = targets.keys()\n\n print('\\nProcessing data subset: %s'%(base_dir))\n count_targets = 0\n for targetIDX, targetID in enumerate(targetIDs):\n \n count_targets += 1\n targetCurr = targets[targetID]\n nViews = len(targetCurr['views'])\n\n for i in range(nViews):\n view_i = targetCurr['views'][i]\n \n # Extract the name of the current patch\n name_i = view_i['imagePath'].split('/')[1]\n load_path_i = os.path.join(opts.dataset_dir, view_i['imagePath'])\n save_path_i = os.path.join(opts.save_dir, 'regTest', 'data', name_i)\n img_i_present = True\n try:\n img_open_i = Image.open(load_path_i)\n except IOError:\n img_i_present = False\n print('Image %s not present!'%(os.path.join(opts.dataset_dir, view_i['imagePath'])))\n \n # If the SSI score of view_i >= 0.20\n if float(view_i['alignData'][32]) >= 0.20 and img_i_present:\n \n img_curr = Image.open(load_path_i)\n \n # Randomly select 1 other targets\n jIDX = random.randint(0, len(targetIDs)-1)\n # Ensure same nearby targets is not selected\n if abs(jIDX - targetIDX) <= 200 :\n jIDX = (jIDX + 200)%len(targetIDs)\n \n jID = targetIDs[jIDX]\n # Compute the distance between targets\n dist_targets = haversine_distance(targets[targetID]['targetCoord'], \\\n targets[jID]['targetCoord'])\n\n # Always select the 1st view of a target\n view_j = targets[jID]['views'][0]\n name_j = view_j['imagePath'].split('/')[1]\n \n load_path_j = os.path.join(opts.dataset_dir, view_j['imagePath'])\n img_j_present = True\n try:\n img_open_j = Image.open(load_path_j)\n except IOError:\n img_j_present = False\n\n if not img_j_present:\n continue\n \n save_name_i = '%.7d_i.jpg'%(count_matches)\n save_name_j = '%.7d_p.jpg'%(count_matches)\n count_matches += 1\n\n save_path_i = os.path.join(opts.save_dir, 'regTest', 'data', save_name_i)\n save_path_j = os.path.join(opts.save_dir, 'regTest', 'data', save_name_j)\n \n for data_pairs in zip([save_path_i, save_path_j], [view_i['alignData'], view_j['alignData']], [img_open_i, img_open_j]):\n \n save_path_curr = data_pairs[0]\n align_data = data_pairs[1]\n img_curr = data_pairs[2]\n # Crop a 192x192 image centered around the aligned center\n l1 = int(float(align_data[1]) - 96)\n t1 = int(float(align_data[2]) - 96)\n r1 = l1 + 192 - 1\n b1 = t1 + 192 - 1\n box1 = (l1, t1, r1, b1)\n img_curr = img_curr.crop(box1)\n # Resize image to 101x101 \n img_curr = img_curr.resize([101, 101])\n img_curr.save(save_path_curr)\n\n # Since these are not matches, set the relative pose, base_angle and translation\n # to a degenerate value\n rel_pose = [0, 0, 0]\n rel_trans = [0, 0, 0] \n b_angle = 0 \n\n # Write out the annotation\n regpairs_negative.write('%s %s %s %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f\\n'\\\n %(save_name_i, save_name_j, '0', rel_pose[0],\\\n rel_pose[1], rel_pose[2], rel_trans[0],\\\n rel_trans[1], rel_trans[2], b_angle, dist_targets))\n\n if count_targets % 50 == 0:\n sys.stdout.write('Finished target [%d/%d]\\n'%(count_targets, len(targets)))\n sys.stdout.flush()\n \n print('\\n')\n\nregpairs_negative.close()\n", "sub_path": "3d-generic/000_mtl_training/008_create_test_splits_negative.py", "file_name": "008_create_test_splits_negative.py", "file_ext": "py", "file_size_in_byte": 5454, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "random.seed", "line_number": 22, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 63, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 71, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 71, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 91, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 91, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 134, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 134, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 135, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 135, "usage_type": "attribute"}]} +{"seq_id": "185120210", "text": "import random\nimport threading\nimport time\nimport logging\n\nlogging.basicConfig(format='%(asctime)s.%(msecs)03d [%(threadName)s] - %(message)s', datefmt='%H:%M:%S', level=logging.INFO)\n\nheladeras = []\nbotellasSobrantes = 0\nlatasSobrantes = 0\n\nsemaforo = threading.Semaphore(1)\n\ncantidadHeladeras = 3\ncantidadProveedores = 1\ncantidadBeodes = 1\n\nclass Heladera(threading.Thread):\n def __init__(self, id):\n super().__init__()\n self.botellas = []\n self.latas = []\n self.id = id\n\n def id(self):\n return self.id\n\n def hBotellas(self):\n return len(self.botellas)\n\n def hLatas(self):\n return len(self.latas)\n\n def hayEspacio(self):\n return (self.hBotellas() < 10) | (self.hLatas() < 15) \n\nclass Proveedores(threading.Thread):\n def __init__(self, monitorProveedor):\n super().__init__()\n self.monitorProveedor = monitorProveedor\n\n def cantidadBotellas(self):\n return random.randint(1, 10)\n\n def cantidadLatas(self):\n return random.randint(1, 10)\n\n def generarCervezas(self):\n global botellasAEntregar, latasAEntregar\n\n botellasAEntregar = self.cantidadBotellas()\n latasAEntregar = self.cantidadLatas()\n logging.info(f'Listo para entregar {botellasAEntregar} botellas y {latasAEntregar} latas')\n time.sleep(2)\n\n def entregarBotellas(self, heladera):\n global botellasAEntregar, botellasSobrantes\n\n botellasSobrantes = botellasSobrantes + botellasAEntregar\n\n logging.info(f'Botellas stock = {botellasSobrantes}')\n time.sleep(2)\n\n while (botellasSobrantes > 0) & (heladera.hBotellas() < 10):\n heladera.botellas.append(0)\n botellasSobrantes = botellasSobrantes - 1\n\n def entregarLatas(self, heladera):\n global latasAEntregar, latasSobrantes\n\n latasSobrantes = latasAEntregar + latasSobrantes\n\n logging.info(f'Latas stock = {latasSobrantes}')\n time.sleep(2)\n\n while (latasSobrantes > 0) & (heladera.hLatas() < 15):\n heladera.latas.append(0)\n latasSobrantes = latasSobrantes - 1\n\n def run(self):\n while(True):\n with self.monitorProveedor:\n for i in range(cantidadHeladeras):\n with monitorBeode:\n while (heladeras[i].hayEspacio()):\n self.generarCervezas()\n self.entregarBotellas(heladeras[i])\n self.entregarLatas(heladeras[i])\n\n logging.info(f'En la heladera {heladeras[i].id} hay {heladeras[i].hBotellas()} botellas y {heladeras[i].hLatas()} latas')\n time.sleep(2)\n logging.info(f'Sobraron {botellasSobrantes} botellas y {latasSobrantes} latas')\n time.sleep(2)\n monitorBeode.notify()\n logging.info(f'La heladera {heladeras[i].id} esta llena con {heladeras[i].hBotellas()} botellas y {heladeras[i].hLatas()} latas')\n time.sleep(2)\n \nclass Beodes(threading.Thread):\n def __init__(self, id, consumirBotellas, consumirLatas):\n super().__init__()\n self.id = id\n self.consumirBotellas = consumirBotellas\n self.consumirLatas = consumirLatas\n\n self.elegirHeladera = random.randint(0, cantidadHeladeras-1)\n\n logging.info(f'Soy el Beode {self.id}, consumiré de la heladera {self.elegirHeladera} un total de {self.consumirBotellas} botellas y {self.consumirLatas} latas')\n\n def beberBotella(self):\n logging.info(f'Bebiendo botella de cerveza')\n heladeras[self.elegirHeladera].botellas.pop(0)\n self.consumirBotellas = self.consumirBotellas - 1\n time.sleep(2)\n logging.info(f'Ya bebí, ahora en la heladera {heladeras[self.elegirHeladera]} quedan {heladeras[self.elegirHeladera].hBotellas()} botellas')\n\n def beberLata(self):\n logging.info(f'Bebiendo lata de cerveza')\n heladeras[self.elegirHeladera].latas.pop(0)\n self.consumirLatas = self.consumirLatas - 1\n time.sleep(2)\n logging.info(f'Ya bebí, ahora en la heladera {heladeras[self.elegirHeladera]} quedan {heladeras[self.elegirHeladera].hLatas()} latas')\n\n def run(self):\n while(True):\n while (self.consumirBotellas > 0) | (self.consumirLatas > 0):\n with monitorBeode:\n if (heladeras[self.elegirHeladera].hBotellas() == 0):\n monitorBeode.wait()\n self.beberBotella()\n time.sleep(5)\n\n if (heladeras[self.elegirHeladera].hLatas() == 0):\n monitorBeode.wait()\n self.beberLata()\n\nmonitorProveedor = threading.Condition()\nmonitorBeode = threading.Condition()\n\nfor i in range(cantidadHeladeras):\n heladeras.append(Heladera(i))\n\nfor i in range(cantidadProveedores):\n Proveedores(monitorProveedor).start()\n\nfor i in range(cantidadBeodes):\n tipoBeode = random.randint(1, 3)\n\n if (tipoBeode == 1):\n consumirBotellas = random.randint(1, 5)\n consumirLatas = 0\n Beodes(i, consumirBotellas, consumirLatas).start()\n elif (tipoBeode == 2):\n consumirBotellas = 0\n consumirLatas = random.randint(1, 5)\n Beodes(i, consumirBotellas, consumirLatas).start()\n elif (tipoBeode == 3):\n consumirBotellas = random.randint(1, 5)\n consumirLatas = random.randint(1, 5)\n Beodes(i, consumirBotellas, consumirLatas).start()", "sub_path": "bonus/bonus1.py", "file_name": "bonus1.py", "file_ext": "py", "file_size_in_byte": 5616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "logging.basicConfig", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}, {"api_name": "threading.Semaphore", "line_number": 12, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 18, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 37, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 43, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 46, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 53, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 61, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 62, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 73, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 74, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 90, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 91, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 92, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 93, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 95, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 98, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 105, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 107, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 110, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 113, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 114, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 117, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 120, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 121, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 130, "usage_type": "call"}, {"api_name": "threading.Condition", "line_number": 136, "usage_type": "call"}, {"api_name": "threading.Condition", "line_number": 137, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 146, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 149, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 154, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 157, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "324372197", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import Perceptron\nfrom matplotlib.colors import ListedColormap\n\n# 初始化 w 和 b,np.array 相当��定义向量\nw, b = np.array([0, 0]), 0\n\n\n# 定义d(x)函数\ndef d(x):\n return np.dot(w, x) + b # np.dot是向量的点积\n\n\n# 历史信用卡发行数据\n# 这里的数据集不能随便修改,否则下面的暴力实现可能停不下来\nX = np.array([[5, 2], [3, 2], [2, 7], [1, 4], [6, 1], [4, 5]])\ny = np.array([-1, -1, 1, 1, -1, 1])\n\n# 感知机的暴力实现\nis_modified = True # 记录是否有分错的点\nwhile is_modified: # 循环,直到没有分错的点\n is_modified = False\n\n # 顺序遍及数据集 X\n for xi, yi in zip(X, y):\n # 如果有分错的\n if yi * d(xi) <= 0:\n # 更新法向量 w 和 b\n w, b = w + yi * xi, b + yi\n is_modified = True\n break\n\n\n# 下面是绘制的代码,主要展示暴力实现的结果,看不懂也没有关系\ndef make_meshgrid(x, y, h=.02):\n \"\"\"Create a mesh of points to plot in\n\n Parameters\n ----------\n x: data to base x-axis meshgrid on\n y: data to base y-axis meshgrid on\n h: stepsize for meshgrid, optional\n\n Returns\n -------\n xx, yy : ndarray\n \"\"\"\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy\n\n\ndef plot_contours(ax, clf, xx, yy, **params):\n \"\"\"Plot the decision boundaries for a classifier.\n\n Parameters\n ----------\n ax: matplotlib axes object\n clf: a classifier\n xx: meshgrid ndarray\n yy: meshgrid ndarray\n params: dictionary of params to pass to contourf, optional\n \"\"\"\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n out = ax.contourf(xx, yy, Z, **params)\n return out\n\n\n# 训练 skrlearn 中的感知机,这里是为了借用该感知机的接口,便于绘制决策区域\nclf = Perceptron().fit(X, y)\n# 根据上面暴力实现得到的 w 和 b 来修改感知机\nclf.coef_[0][0], clf.coef_[0][1], clf.intercept_[0] = w[0], w[1], b\n\n# 设置字体大小\nplt.rcParams.update({'font.size': 14})\n# 设置画布和坐标系\nfig, ax = plt.subplots(figsize=(6, 3), nrows=1, ncols=1)\nfig.subplots_adjust(left=0.25, right=0.75, top=0.999, bottom=0.001)\nax.set_xticks(()), ax.set_yticks(())\n\ncm = ListedColormap(('blue', 'red'))\nmarkers = ('x', 'o')\n\n# 决定绘制区域的大小\nX0, X1 = X[:, 0], X[:, 1]\nxx, yy = make_meshgrid(X0, X1)\nax.set_xlim(xx.min(), xx.max())\nax.set_ylim(yy.min(), yy.max())\n\n# 绘制决策区域\nplot_contours(ax, clf, xx, yy, cmap=cm, alpha=0.4)\n\n# 绘制决策直线\nlx = np.linspace(xx.min(), xx.max())\nly = - w[0] / w[1] * lx - b / w[1]\nax.plot(lx, ly, 'k-')\n\n# 根据类别不同,绘制不同形状的点\nvmin, vmax = min(y), max(y)\nfor cl, m in zip(np.unique(y), markers):\n ax.scatter(x=X0[y == cl], y=X1[y == cl], c=y[y == cl], alpha=1, vmin=vmin, vmax=vmax, cmap=cm, edgecolors='k',\n marker=m)\n\nplt.show()\n\n", "sub_path": "perceptron.py", "file_name": "perceptron.py", "file_ext": "py", "file_size_in_byte": 3140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 67, "usage_type": "attribute"}, {"api_name": "sklearn.linear_model.Perceptron", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 79, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}]} +{"seq_id": "475608807", "text": "# -*- coding:utf-8 -*- \nimport grpc\nimport json\nimport logging\nimport service_pb2\nimport service_pb2_grpc\n\ndef setRecordResult(stub,SetPerson):\n result = stub.RecordDB(SetPerson)\n if not result.success:\n print(\"Kullanıcı Kaydedilemedi.\")\n return\n if result.success:\n print(\"Kullanıcı kaydedildi Key: %s Value: %s\" % (SetPerson.key, SetPerson.value))\n \ndef recordResult(stub,data,i):\n setRecordResult(stub, service_pb2.SetPerson(key = i, value = data))\n\ndef getGetRecordResult(stub,GetPerson):\n result = stub.GetRecordDB(GetPerson)\n if not result.success:\n print(\"Kullanıcı Bulunamadı.\")\n return\n if result.success:\n print(\"Kullanıcı Bulundu key: %s \" % (GetPerson.key))\n\ndef getRecordResult(stub,i):\n getGetRecordResult(stub, service_pb2.GetPerson(key = i))\n\ndef readJSON(jsonDosyasi):\n with open(jsonDosyasi) as f:\n data = json.load(f)\n return data\ndef run():\n with grpc.insecure_channel('localhost:4040') as channel:\n stub = service_pb2_grpc.GrpcServiceStub(channel)\n for i in range(1, 11):\n jsonDosyasi = str(i) + \".json\"\n data = readJSON(\"users/\" + jsonDosyasi)\n print(\"---------------Record Result---------------\") \n recordResult(stub,str(data),str(i))\n for i in range(1, 11):\n print(\"---------------Get Record Result---------------\")\n getRecordResult(stub,str(i))\n\nif __name__ == '__main__':\n logging.basicConfig()\n run()\n", "sub_path": "src/client/grpc.client.py", "file_name": "grpc.client.py", "file_ext": "py", "file_size_in_byte": 1522, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "service_pb2.SetPerson", "line_number": 17, "usage_type": "call"}, {"api_name": "service_pb2.GetPerson", "line_number": 28, "usage_type": "call"}, {"api_name": "json.load", "line_number": 32, "usage_type": "call"}, {"api_name": "grpc.insecure_channel", "line_number": 35, "usage_type": "call"}, {"api_name": "service_pb2_grpc.GrpcServiceStub", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "56527382", "text": "import pickle\nimport json\ndef fundate(i):\n try :\n n1=int(i.split('.')[2])\n n2=i.split('/')[1]\n \n Jornal=''\n if i.find('AccelBeams') != -1:\n Jornal='PRAB'\n elif i.find('Applied') != -1:\n Jornal='PRAPPLIED'\n elif i.find('Fluids') != -1:\n Jornal='PRFLUIDS'\n elif i.find('Series') != -1:\n Jornal='PRI'\n elif i.find('EducRes') != -1:\n Jornal='PRPER'\n elif i.find('STAB') != -1:\n Jornal='PRSTAB'\n elif i.find('STPER') != -1:\n Jornal='PRSTPER'\n elif i.find('RevX') != -1:\n Jornal='PRX'\n elif i.find('Mod') != -1:\n Jornal='RMP'\n elif i.find('RevA') != -1:\n Jornal='PRA'\n elif i.find('RevB') != -1:\n Jornal='PRB'\n elif i.find('RevC') != -1:\n Jornal='PRC'\n elif i.find('RevD') != -1:\n Jornal='PRD'\n elif i.find('RevE') != -1:\n Jornal='PRE'\n elif i.find('Lett') != -1: \n Jornal='PRL'\n else:\n Jornal='PR'\n date=''\n with open(\"aps-dataset-metadata-2016/\"+Jornal+\"/\"+str(n1)+\"/\"+n2+\".json\") as json_file:\n json_data1 = json.load(json_file)\n date=json_data1.get('date')\n except:\n date='N'\n return date\n \ndef fungender(i):\n gen=[]\n try:\n n1=int(i.split('.')[2])\n n2=i.split('/')[1] \n Jornal=''\n if i.find('AccelBeams') != -1:\n Jornal='PRAB'\n elif i.find('Applied') != -1:\n Jornal='PRAPPLIED'\n elif i.find('Fluids') != -1:\n Jornal='PRFLUIDS'\n elif i.find('Series') != -1:\n Jornal='PRI'\n elif i.find('EducRes') != -1:\n Jornal='PRPER'\n elif i.find('STAB') != -1:\n Jornal='PRSTAB'\n elif i.find('STPER') != -1:\n Jornal='PRSTPER'\n elif i.find('RevX') != -1:\n Jornal='PRX'\n elif i.find('Mod') != -1:\n Jornal='RMP'\n elif i.find('RevA') != -1:\n Jornal='PRA'\n elif i.find('RevB') != -1:\n Jornal='PRB'\n elif i.find('RevC') != -1:\n Jornal='PRC'\n elif i.find('RevD') != -1:\n Jornal='PRD'\n elif i.find('RevE') != -1:\n Jornal='PRE'\n elif i.find('Lett') != -1: \n Jornal='PRL'\n else:\n Jornal='PR'\n with open(\"aps-dataset-metadata-2016/\"+Jornal+\"/\"+str(n1)+\"/\"+n2+\".json\") as json_file:\n json_data1 = json.load(json_file)\n authorname=''\n \n if 'authors'in json_data1:\n s=json_data1.get('authors')\n for z in s:\n if 'firstname' in z:\n fi=z.get('firstname')\n if fi.isspace() or fi=='':\n authorname=authorname+'NON'\n else:\n authorname=authorname+fi\n else:\n authorname=authorname+'NON'\n if 'surname' in z:\n su=z.get('surname')\n authorname=authorname+'+'+su\n else:\n authorname=authorname+'+'+'NON'\n if authorname in finalfullnamedict.keys():\n gen.append(finalfullnamedict.get(authorname))\n return(gen,len(s))\n except:\n return (gen,0)\n \n\n \npkl_file = open('APSJournals/APSJournals-net/data/UniqueIndgrees.pkl', 'rb')\nUniqueIndgrees = pickle.load(pkl_file)\npkl_file.close()\n\npkl_file = open('name/data/genderdict.pkl', 'rb')\nfinalfullnamedict = pickle.load(pkl_file)\npkl_file.close()\n\nU=list(UniqueIndgrees)\nToFemale=[]\n\nfrom multiprocessing import Pool\n\ndef funtf(i):\n TF=[]\n pkl_file = open('APSJournals/APSJournals-net/data/qij'+str(i)+'.pkl', 'rb')\n qij = pickle.load(pkl_file)\n pkl_file.close()\n for j in qij:\n date0=fundate(j[0])\n date1=fundate(j[1])\n fe=0\n if date0!='N' and date1!='N':\n if date0 < date1: \n g,s=fungender(j[0])\n if s!=0:\n# if len(g)> (0.8*s): \n if len(g)==s: \n for k in g:\n if k=='male':\n fe=fe+1\n if fe/s > 0.5: \n TF.append((j[1],j[0],j[2]))\n else:\n g,s=fungender(j[1])\n if s!=0:\n if len(g)==s: \n for k in g:\n if k=='male':\n fe=fe+1\n if fe/s > 0.5: \n TF.append((j[0],j[1],j[2]))\n return TF\n \nwith Pool(8) as p:\n n=p.map(funtf,U) \nfor i in n:\n ToFemale=ToFemale+i\noutput = open('APSJournals/APSJournals-net/data/ToMaledominate.pkl', 'wb')\npickle.dump(ToFemale, output)\noutput.close()\n", "sub_path": "MissingCitations/DominantGender/ToMale.py", "file_name": "ToMale.py", "file_ext": "py", "file_size_in_byte": 5571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "json.load", "line_number": 43, "usage_type": "call"}, {"api_name": "json.load", "line_number": 88, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 116, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 120, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 131, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 159, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "602322936", "text": "import os\nimport sys\nimport platform\nimport glob\nimport json\nfrom datetime import datetime as dt\nfrom shutil import copy2 as cp\nimport mimetypes\n\n\ndef copy_file(file, path):\n try:\n cp(f'{path}{file}', os.path.join('notes', 'resources'))\n except FileNotFoundError:\n print(f'File \"{file}\" not found in {path}')\n return False\n else:\n return True\n\n# remove illegal chars for current OS\ndef clean_title(title) -> str:\n ostype = platform.system()\n if ostype == 'Linux':\n title = title.replace('/', '_')\n elif ostype == 'Darwin':\n title = title.replace(':', ' ')\n elif ostype == 'Windows':\n title = title.replace('\\\\', '_').replace('/', '_').replace('|', '_')\n title = title.replace('<', '-').replace('>', '-').replace(':', ' ')\n title = title.replace('?', '').replace('\"', '').replace('*', '')\n title = title.replace('\\n', '')\n return title\n\ndef read_annotations(list) -> str:\n annotations_list = '*Weblinks:*'\n for entry in list:\n if entry['source'] == 'WEBLINK':\n title = entry['title']\n url = entry['url']\n annotations_list += f' [{title}]({url});'\n return annotations_list\n\ndef read_attachments(list, path) -> str:\n attachments_list = '*Attachments:*\\n'\n for entry in list:\n if 'image' in entry['mimetype']:\n image = entry['filePath']\n if copy_file(image, path) is False:\n # If the file could not be found,\n # it will be checked if the file can be found\n # another file format.\n # Google used '.jpeg' instead of '.jpg'\n image_type = mimetypes.guess_type(f'{path}{image}')\n types = mimetypes.guess_all_extensions(image_type[0])\n for type in types:\n if type in image:\n image_name = image.replace(type, '')\n for t in types:\n if len(glob.glob(f'{path}{image_name}{t}')) > 0:\n image = f'{image_name}{t}'\n print(f'Found \"{image}\"')\n copy_file(image, path)\n respath = os.path.join('resources','')\n attachments_list += f'![{image}]({respath}{image})\\n'\n return attachments_list\n\ndef read_tasklist(list) -> str:\n content_list = '*Tasklist:*\\n'\n for entry in list:\n text = entry['text']\n if entry['isChecked'] is True:\n content_list += f'- [x] {text}\\n'\n else:\n content_list += f'- [ ] {text}\\n'\n return content_list\n\ndef read_tags(tags) -> str:\n tag_list = 'tags:'\n for entry in tags:\n tag = entry['name']\n tag_list += f' {tag};'\n return tag_list\n\ndef read_write_notes(path):\n jsonpath = os.path.join(path, '')\n notes = glob.glob(f'{jsonpath}*.json')\n for note in notes:\n with open(note, 'r', encoding='utf-8') as jsonfile:\n data = json.load(jsonfile)\n timestamp = data['userEditedTimestampUsec']\n if timestamp == 0:\n iso_datetime = dt.now().strftime('%Y%m%dT%H%M%S_edited')\n else:\n iso_datetime = dt.fromtimestamp(timestamp/1000000).strftime('%Y%m%dT%H%M%S')\n\n if data['title'] != '':\n title = str(data['title'])\n filename = clean_title(title)\n if len(filename) > 100:\n filename = filename[0:99]\n else:\n title = iso_datetime\n filename = title\n\n notespath = os.path.join('notes', '')\n if not os.path.exists(f'{notespath}{filename}.md'):\n print(f'Convert: {title}')\n with open(f'{notespath}{filename}.md', 'w', encoding='utf-8') as mdfile:\n mdfile.write(f'---\\n')\n mdfile.write(f'title: {title}\\n')\n if (title != iso_datetime):\n mdfile.write(f'date: {iso_datetime}\\n')\n # add tags\n try:\n tags = read_tags(data['labels'])\n mdfile.write(f'{tags}\\n')\n except KeyError:\n print('No tags available.')\n mdfile.write(f'---\\n\\n')\n # add text content\n try:\n textContent = data['textContent']\n mdfile.write(f'{textContent}\\n\\n')\n except KeyError:\n print('No text content available.')\n # add tasklist\n try:\n tasklist = read_tasklist(data['listContent'])\n mdfile.write(f'{tasklist}\\n\\n')\n except KeyError:\n print('No tasklist available.')\n # add annotations\n try:\n annotations = read_annotations(data['annotations'])\n mdfile.write(f'{annotations}')\n except KeyError:\n print('No annotations available.')\n # add attachments\n try:\n attachments = read_attachments(data['attachments'], path)\n mdfile.write(f'{attachments}')\n except KeyError:\n print('No attachments available.')\n else:\n print(f'File \"{title}\" exists!')\n\ndef create_folder():\n try:\n workpath = os.path.join('notes', 'resources')\n if not os.path.exists(workpath):\n os.makedirs(workpath)\n print('Create folder \"notes\" - home of markdown files.')\n except OSError:\n print('Creation of folders failed.')\n\nif __name__ == '__main__':\n create_folder()\n try:\n read_write_notes(sys.argv[1])\n except IndexError:\n print('Please enter a correct path!')\n", "sub_path": "keep-to-markdown.py", "file_name": "keep-to-markdown.py", "file_ext": "py", "file_size_in_byte": 6028, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "shutil.copy2", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 22, "usage_type": "call"}, {"api_name": "mimetypes.guess_type", "line_number": 53, "usage_type": "call"}, {"api_name": "mimetypes.guess_all_extensions", "line_number": 54, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 86, "usage_type": "call"}, {"api_name": "json.load", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 92, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 94, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 151, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 159, "usage_type": "attribute"}]} +{"seq_id": "45012873", "text": "import re\n\n# from the iso8601 package, plus ^ and $ on the edges\nISO8601_REGEX = re.compile(r\"^([0-9]{4})(-([0-9]{1,2})(-([0-9]{1,2})\"\n r\"((.)([0-9]{2}):([0-9]{2})(:([0-9]{2})(\\.([0-9]+))?)?\"\n r\"(Z|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?$\")\n\nURL_REGEX = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https:// or ftp:// or ftps://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' #domain...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\nACCRUAL_PERIODICITY_VALUES = (None, \"Annual\", \"Bimonthly\", \"Semiweekly\", \"Daily\", \"Biweekly\", \"Semiannual\", \"Biennial\", \"Triennial\", \"Three times a week\", \"Three times a month\", \"Continuously updated\", \"Monthly\", \"Quarterly\", \"Semimonthly\", \"Three times a year\", \"Weekly\", \"Completely irregular\")\n\nLANGUAGE_REGEX = re.compile(\"^[A-Za-z]{2}([A-Za-z]{2})?$\")\n\n# load the OMB bureau codes on first load of this module\nimport urllib, csv\nomb_burueau_codes = set()\nfor row in csv.DictReader(urllib.urlopen(\"https://raw.github.com/seanherron/OMB-Agency-Bureau-and-Treasury-Codes/master/omb-agency-bureau-treasury-codes.csv\")):\n omb_burueau_codes.add(row[\"OMB Agency Code\"] + \":\" + row[\"OMB Bureau Code\"])\n\n# main function for validation\ndef do_validation(doc, errors_array):\n errs = { }\n \n if type(doc) != list:\n add_error(errs, 0, \"Bad JSON Structure\", \"The file must be an array at its top level. That means the file starts with an open bracket [ and ends with a close bracket ].\")\n elif len(doc) == 0:\n add_error(errs, 0, \"Catalog Is Empty\", \"There are no entries in your file.\")\n else:\n seen_identifiers = set()\n \n for i, item in enumerate(doc):\n # Required\n \n # title\n dataset_name = \"dataset %d\" % (i+1)\n if check_string_field(item, \"title\", 5, dataset_name, errs):\n dataset_name = '\"%s\"' % item.get(\"title\", \"\").strip()\n \n # description\n check_string_field(item, \"description\", 30, dataset_name, errs)\n \n # keyword\n if isinstance(item.get(\"keyword\"), (str, unicode)):\n add_error(errs, 5, \"Update Your File!\", \"The keyword field used to be a string but now it must be an array.\", dataset_name)\n \n elif check_required_field(item, \"keyword\", list, dataset_name, errs):\n for kw in item[\"keyword\"]:\n if not isinstance(kw, (str, unicode)):\n add_error(errs, 5, \"Invalid Required Field Value\", \"Each keyword in the keyword array must be a string\", dataset_name)\n elif len(kw.strip()) == 0:\n add_error(errs, 5, \"Invalid Required Field Value\", \"A keyword in the keyword array was an empty string.\", dataset_name)\n \n # bureauCode\n if check_required_field(item, \"bureauCode\", list, dataset_name, errs):\n for bc in item[\"bureauCode\"]:\n if not isinstance(bc, (str, unicode)):\n add_error(errs, 5, \"Invalid Required Field Value\", \"Each bureauCode must be a string\", dataset_name)\n elif \":\" not in bc:\n add_error(errs, 5, \"Invalid Required Field Value\", \"The bureau code \\\"%s\\\" is invalid. Start with the agency code, then a colon, then the bureau code.\" % bc, dataset_name)\n elif bc not in omb_burueau_codes:\n add_error(errs, 5, \"Invalid Required Field Value\", \"The bureau code \\\"%s\\\" was not found in our list.\" % bc, dataset_name)\n \n # modified\n check_date_field(item, \"modified\", dataset_name, errs)\n \n # publisher\n check_string_field(item, \"publisher\", 1, dataset_name, errs)\n \n # contactPoint\n check_string_field(item, \"contactPoint\", 3, dataset_name, errs)\n \n # mbox\n if check_string_field(item, \"mbox\", 3, dataset_name, errs):\n import lepl.apps.rfc3696\n email_validator = lepl.apps.rfc3696.Email()\n if not email_validator(item[\"mbox\"]):\n add_error(errs, 5, \"Invalid Required Field Value\", \"The email address \\\"%s\\\" is not a valid email address.\" % item[\"mbox\"], dataset_name)\n \n # identifier\n if check_string_field(item, \"identifier\", 1, dataset_name, errs):\n if item[\"identifier\"] in seen_identifiers:\n add_error(errs, 5, \"Invalid Required Field Value\", \"The dataset identifier \\\"%s\\\" is used more than once.\" % item[\"identifier\"], dataset_name)\n seen_identifiers.add(item[\"identifier\"])\n \n # programOffice\n if check_required_field(item, \"programOffice\", list, dataset_name, errs):\n for s in item[\"programOffice\"]:\n if not isinstance(s, (str, unicode)):\n add_error(errs, 5, \"Invalid Required Field Value\", \"Each value in the programOffice array must be a string\", dataset_name)\n elif len(s.strip()) == 0:\n add_error(errs, 5, \"Invalid Required Field Value\", \"A value in the programOffice array was an empty string.\", dataset_name)\n \n # accessLevel\n if check_string_field(item, \"accessLevel\", 0, dataset_name, errs):\n if item[\"accessLevel\"] not in (\"public\", \"restricted public\", \"non-public\"):\n add_error(errs, 5, \"Invalid Required Field Value\", \"The field 'accessLevel' had an invalid value: \\\"%s\\\"\" % item[\"accessLevel\"], dataset_name)\n elif item[\"accessLevel\"] == \"non-public\":\n add_error(errs, 1, \"Possible Private Data Leakage\", \"A dataset appears with accessLevel set to \\\"non-public\\\".\", dataset_name)\n \n # Required-If-Applicable\n \n # accessLevelComment\n if item.get(\"accessLevel\") != \"public\":\n check_string_field(item, \"accessLevelComment\", 10, dataset_name, errs)\n \n # accessURL & webService\n check_url_field(False, item, \"accessURL\", dataset_name, errs)\n check_url_field(False, item, \"webService\", dataset_name, errs)\n if item.get(\"accessLevel\") == \"public\" and item.get(\"accessURL\") is None:\n add_error(errs, 20, \"Where's the Dataset?\", \"A public dataset is missing an accessURL.\", dataset_name)\n elif item.get(\"accessURL\") is None and item.get(\"webService\") is None:\n add_error(errs, 20, \"Where's the Dataset?\", \"A dataset has neither an accessURL nor a webService.\", dataset_name)\n \n # format\n # TODO: MIME yes, but array?\n if item.get(\"accessURL\"):\n check_string_field(item, \"format\", 1, dataset_name, errs)\n \n # license\n if item.get(\"license\") is not None and not isinstance(item.get(\"license\"), (str, unicode)):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'license' must be a string value if specified.\", dataset_name)\n \n # spatial\n # TODO: There are more requirements than it be a string.\n if item.get(\"spatial\") is not None and not isinstance(item.get(\"spatial\"), (str, unicode)):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'spatial' must be a string value if specified.\", dataset_name)\n \n # temporal\n if item.get(\"temporal\") is None:\n pass # not required\n elif not isinstance(item[\"temporal\"], (str, unicode)):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'temporal' must be a string value if specified.\", dataset_name)\n elif \"/\" not in item[\"temporal\"]:\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'temporal' must be two dates separated by a forward slash.\", dataset_name)\n else:\n d1, d2 = item[\"temporal\"].split(\"/\", 1)\n if not ISO8601_REGEX.match(d1) or not ISO8601_REGEX.match(d2):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'temporal' has an invalid start or end date.\", dataset_name)\n \n # Expanded Fields\n \n # theme\n if item.get(\"theme\") is None:\n pass # not required\n elif not isinstance(item[\"theme\"], list):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'theme' must be an array.\", dataset_name)\n else:\n for s in item[\"theme\"]:\n if not isinstance(s, (str, unicode)):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"Each value in the theme array must be a string\", dataset_name)\n elif len(s.strip()) == 0:\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"A value in the theme array was an empty string.\", dataset_name)\n \n # dataDictionary\n check_url_field(False, item, \"dataDictionary\", dataset_name, errs)\n \n # dataQuality\n if item.get(\"dataQuality\") is None:\n pass # not required\n elif not isinstance(item[\"dataQuality\"], bool):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'theme' must be true or false, as a JSON boolean literal (not the string \\\"true\\\" or \\\"false\\\").\", dataset_name)\n \n # distribution\n if item.get(\"distribution\") is None:\n pass # not required\n elif not isinstance(item[\"distribution\"], list):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'distribution' must be an array, if present.\", dataset_name)\n else:\n for j, d in enumerate(item[\"distribution\"]):\n resource_name = dataset_name + (\" distribution %d\" % (j+1))\n check_url_field(True, d, \"accessURL\", resource_name, errs)\n check_string_field(d, \"format\", 1, resource_name, errs)\n # TODO: Check that it's a MIME type.\n \n # accrualPeriodicity\n if item.get(\"accrualPeriodicity\") not in ACCRUAL_PERIODICITY_VALUES:\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'accrualPeriodicity' had an invalid value.\", dataset_name)\n \n # landingPage\n check_url_field(False, item, \"landingPage\", dataset_name, errs)\n \n # language\n if item.get(\"language\") is None:\n pass # not required\n elif not isinstance(item[\"language\"], list):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'language' must be an array, if present.\", dataset_name)\n else:\n for s in item[\"language\"]:\n if not LANGUAGE_REGEX.matches(s):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'language' had an invalid language: \\\"%s\\\"\" % s, dataset_name)\n \n # PrimaryITInvestmentUII\n if item.get(\"PrimaryITInvestmentUII\") is None:\n pass # not required\n elif not isinstance(item[\"PrimaryITInvestmentUII\"], (str, unicode)):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'PrimaryITInvestmentUII' must be a string, if present.\", dataset_name)\n \n # references\n if item.get(\"references\") is None:\n pass # not required\n elif not isinstance(item[\"references\"], list):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'references' must be an array, if present.\", dataset_name)\n else:\n for s in item[\"references\"]:\n if not URL_REGEX.match(s):\n add_error(errs, 50, \"Invalid Field Value (Optional Fields)\", \"The field 'references' had an invalid URL: \\\"%s\\\"\" % s, dataset_name)\n \n # issued\n if item.get(\"issued\") is not None:\n check_date_field(item, \"issued\", dataset_name, errs)\n \n # systemOfRecords\n # TODO: No details in the schema!\n \n # Form the output data.\n for err_type in sorted(errs):\n errors_array.append( (\n err_type[1], # heading\n [ err_item + (\" (%d locations)\" % len(errs[err_type][err_item]) if len(errs[err_type][err_item]) else \"\")\n for err_item in sorted(errs[err_type], key=lambda x:(-len(errs[err_type][x]), x))\n ]) )\n \ndef add_error(errs, severity, heading, description, context=None):\n s = errs.setdefault((severity, heading), { }).setdefault(description, set())\n if context: s.add(context)\n\ndef nice_type_name(data_type):\n if data_type == (str, unicode) or data_type in (str, unicode):\n return \"string\"\n elif data_type == list:\n return \"array\"\n else:\n return str(data_type)\n\ndef check_required_field(obj, field_name, data_type, dataset_name, errs):\n # checks that a field exists and has the right type\n if field_name not in obj:\n add_error(errs, 10, \"Missing Required Fields\", \"The '%s' field is missing.\" % field_name, dataset_name)\n return False\n elif obj[field_name] is None:\n add_error(errs, 10, \"Missing Required Fields\", \"The '%s' field is set to null.\" % field_name, dataset_name)\n return False\n elif not isinstance(obj[field_name], data_type):\n add_error(errs, 5, \"Invalid Required Field Value\", \"The '%s' field must be a %s but it has a different datatype (%s).\" % (field_name, nice_type_name(data_type), nice_type_name(type(obj[field_name]))), dataset_name)\n return False\n elif isinstance(obj[field_name], list) and len(obj[field_name]) == 0:\n add_error(errs, 10, \"Missing Required Fields\", \"The '%s' field is an empty array.\" % field_name, dataset_name)\n return False\n return True\n\ndef check_string_field(obj, field_name, min_length, dataset_name, errs):\n # checks that a required field exists, is typed as a string, and has a minimum length\n if not check_required_field(obj, field_name, (str, unicode), dataset_name, errs):\n return False\n elif len(obj[field_name].strip()) == 0:\n add_error(errs, 10, \"Missing Required Fields\", \"The '%s' field is present but empty.\" % field_name, dataset_name)\n return False\n elif len(obj[field_name].strip()) <= min_length:\n add_error(errs, 100, \"Are These Okay?\", \"The '%s' field is very short: \\\"%s\\\"\" % (field_name, obj[field_name]), dataset_name)\n return False\n return True\n \ndef check_date_field(obj, field_name, dataset_name, errs):\n # checks that a required date field exists and looks like a date\n if not check_required_field(obj, field_name, (str, unicode), dataset_name, errs):\n return False\n elif len(obj[field_name].strip()) == 0:\n add_error(errs, 10, \"Missing Required Fields\", \"The '%s' field is present but empty.\" % field_name, dataset_name)\n return False\n else:\n if not ISO8601_REGEX.match(obj[field_name]):\n add_error(errs, 5, \"Invalid Required Field Value\", \"The '%s' field has an invalid ISO 8601 date or date-time value: \\\"%s\\\".\" % (field_name, obj[field_name]), dataset_name)\n return False\n return True\n \ndef check_url_field(required, obj, field_name, dataset_name, errs):\n # checks that a required or optional field, if specified, looks like a URL\n if not required and (field_name not in obj or obj[field_name] is None): return True # not required, so OK\n if not check_required_field(obj, field_name, (str, unicode), dataset_name, errs): return False # just checking data type\n if not URL_REGEX.match(obj[field_name]):\n add_error(errs, 5, \"Invalid Required Field Value\", \"The '%s' field has an invalid URL: \\\"%s\\\".\" % (field_name, obj[field_name]), dataset_name)\n return False\n return True\n\n\n", "sub_path": "ckanext/datajson/datajsonvalidator.py", "file_name": "datajsonvalidator.py", "file_ext": "py", "file_size_in_byte": 16578, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "re.compile", "line_number": 4, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 8, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 13, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.urlopen", "line_number": 22, "usage_type": "call"}, {"api_name": "lepl.apps.rfc3696.apps.rfc3696.Email", "line_number": 80, "usage_type": "call"}, {"api_name": "lepl.apps.rfc3696.apps", "line_number": 80, "usage_type": "attribute"}, {"api_name": "lepl.apps.rfc3696", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "556617415", "text": "\"\"\"\nCopyright 2020, All rights reserved.\nAuthor : SangJae Kang\nMail : craftsangjae@gmail.com\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nfrom .minhash import MinHash\ntqdm.pandas()\n\n\nclass MinHashSubGraph:\n \"\"\" Minhash 알고리즘을 바탕으로, Knowledge Graph을 몇개의 subgraph로 쪼개어주는 class\n\n >>> minsub = MinHashSubGraph()\n >>> minsub.group(graph)\n\n \"\"\"\n\n def __init__(self,\n num_clusters=20,\n maximum_size=100000,\n k_core=5,\n random_seed=1,\n verbose=1):\n self.num_clusters = num_clusters\n self.maximum_size = maximum_size\n self.k_core = k_core\n self.random_seed = random_seed\n self.verbose = verbose\n self.__dir__ = ['group']\n\n def group(self, graph: np.ndarray):\n \"\"\" graph을 subgraph로 나누어 묶는 함수\n \"\"\"\n graph_df = self.graph2dataframe(graph)\n\n cluster_df = self.assign_cluster_ids(graph_df)\n\n subgraphs = self.group_by_subgraph(graph_df, cluster_df)\n\n subgraphs = self.trim_subgraph(subgraphs)\n\n return subgraphs\n\n def graph2dataframe(self, graph: np.ndarray) -> pd.DataFrame:\n \"\"\" graph을 dataframe의 형태로 변환\n \"\"\"\n if graph.ndim != 2 or graph.shape[1] < 2:\n raise ValueError(\"# ndim of graph should be 2 and graph.shape[1] should be bigger than 1\")\n\n graph_df = pd.DataFrame(graph)\n return graph_df.rename({0: 'head', 1: 'tail'}, axis=1)\n\n def assign_cluster_ids(self, graph_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\" graph의 head 별로 num_sigs개 cluster id를 부여\n \"\"\"\n cluster_series = self._apply_minhash(graph_df)\n\n cluster_df = self._cluster_series2dataframe(cluster_series)\n return cluster_df\n\n def _apply_minhash(self, graph_df: pd.DataFrame) -> pd.Series:\n generate_minhash = MinHash(self.num_clusters, self.random_seed)\n if self.verbose:\n return graph_df.groupby('head')['tail'].progress_apply(generate_minhash)\n else:\n return graph_df.groupby('head')['tail'].apply(generate_minhash)\n\n def _cluster_series2dataframe(self, cluster_series) -> pd.DataFrame:\n \"\"\" cluster series를 cluster dataframe으로 변환\n \"\"\"\n clusters = [cluster_series.apply(lambda x: x[i]) for i in range(self.num_clusters)]\n clusters_df = pd.concat(clusters, axis=1)\n clusters_df.columns = [f'cluster{i}' for i in range(self.num_clusters)]\n return clusters_df\n\n def group_by_subgraph(self, graph_df, cluster_df):\n \"\"\" head 별 cluster id를 바탕으로 subgraph을 묶음\n \"\"\"\n subgraphs = {}\n for sig_name, cluster_series in cluster_df.iteritems():\n heads = self._merge_heads(cluster_series)\n subgraphs[sig_name] = self._extract_subgraph(graph_df, heads)\n return subgraphs\n\n def _merge_heads(self, cluster_series):\n \"\"\" head 노드의 갯수가 maximum_size가 넘지 않는 최대 만큼 가져오기\n \"\"\"\n cluster_counts = cluster_series.value_counts().sort_values(ascending=False)\n cluster_ids = cluster_counts[cluster_counts.cumsum() < self.maximum_size].index\n if len(cluster_ids) == 0:\n cluster_ids = cluster_counts.index[:1]\n return cluster_series[cluster_series.isin(cluster_ids)].index\n\n def _extract_subgraph(self, graph_df, heads):\n \"\"\" head을 포함하고 있는 subgraph을 가져오기\n \"\"\"\n return graph_df[graph_df['head'].isin(heads)].values\n\n def trim_subgraph(self, subgraphs):\n \"\"\" K-core 방식을 통해 subgraph의 크기를 추림\n \"\"\"\n if self.verbose:\n for name, subgraph in tqdm(subgraphs.items()):\n subgraphs[name] = self._kcore_sampling(subgraph)\n else:\n for name, subgraph in subgraphs.items():\n subgraphs[name] = self._kcore_sampling(subgraph)\n return subgraphs\n\n def _kcore_sampling(self, graph):\n \"\"\" k개 이하의 약한 연결을 보이는 노드들(head&tail)을 추림\n \"\"\"\n if self.k_core <= 0:\n return graph\n df = self.graph2dataframe(graph)\n\n head_counts = df['head'].value_counts()\n tail_counts = df['tail'].value_counts()\n\n df = df[df['head'].isin(head_counts[head_counts >= self.k_core].index)\n & df['tail'].isin(tail_counts[tail_counts >= self.k_core].index)]\n\n return df.values\n\n", "sub_path": "graphembedding/subgraph/subgraph.py", "file_name": "subgraph.py", "file_ext": "py", "file_size_in_byte": 4570, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "tqdm.tqdm.pandas", "line_number": 10, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "attribute"}, {"api_name": "minhash.MinHash", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "84657003", "text": "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\nimport functools\nfrom devtools_testutils import EnvironmentVariableLoader\n\nMapsRoutePreparer = functools.partial(\n EnvironmentVariableLoader, \"maps\",\n subscription_key=\"\",\n maps_client_id=\"fake_client_id\",\n maps_client_secret=\"fake_secret\",\n maps_tenant_id=\"fake_tenant_id\",\n)", "sub_path": "sdk/maps/azure-maps-route/tests/route_preparer.py", "file_name": "route_preparer.py", "file_ext": "py", "file_size_in_byte": 624, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "functools.partial", "line_number": 9, "usage_type": "call"}, {"api_name": "devtools_testutils.EnvironmentVariableLoader", "line_number": 10, "usage_type": "argument"}]} +{"seq_id": "557348390", "text": "# coding=utf-8\r\nfrom appium import webdriver\r\nimport unittest\r\nfrom appium import webdriver\r\nimport time\r\nimport random\r\nfrom selenium.webdriver.support.wait import WebDriverWait #显示等待\r\n\r\n#定义手机信息\r\ndesired_caps = dict(\r\n platformName = \"Android\",\r\n platformVersion = \"9\",\r\n devicesName = \"8BNDU17803005724\",\r\n appActivity = \"com.eg.android.AlipayGphone.AlipayLogin\",\r\n appPackage = \"com.eg.android.AlipayGphone\",\r\n noReset = \"true\",\r\n fullReset = \"false\"\r\n)\r\n\r\n#进入扫一扫并进入相册\r\ndef intoSaoyiSao(driver):\r\n el8 = driver.find_element_by_id(\"com.alipay.android.phone.openplatform:id/saoyisao_iv\")\r\n el8.click()\r\n time.sleep(2)\r\n el2 = driver.find_element_by_id(\"com.alipay.mobile.scan:id/title_bar_album\")\r\n el2.click()\r\n time.sleep(2)\r\n#滑动照片\r\ndef scrollPhoto(driver):\r\n driver.swipe(start_x=560,start_y=980,end_x=560,end_y=460,duration=3000)\r\n time.sleep(3)\r\n\r\n#图片名称数组\r\nPhoto = [\"照片1,2020年10月22日 04点11分 \"]\r\n\"\"\" Photo = [\"照片1,2020年10月22日 04点11分 \",\"照片2,2020年10月21日 14点23分 \",\"照片3,2020年10月21日 14点23分 \",\"照片4,2020年10月21日 14点23分 \",\"照片5,2020年10月21日 14点23分 \",\\\r\n \"照片6,2020年10月21日 14点23分 \",\"照片7,2020年10月21日 14点23分 \",\"照片8,2020年10月21日 14点23分 \",\"照片9,2020年10月21日 14点23分 \",\"照片10,2020年10月21日 14点23分 \",\\\r\n \"照片11,2020年10月21日 14点23分 \",\"照片12,2020年10月21日 14点23分 \",\"照片13,2020年10月21日 14点23分 \",\"照片14,2020年10月21日 14点23分 \",\"照片15,2020年10月21日 14点23分 \",\\\r\n \"照片16,2020年10月21日 14点23分 \",\"照片17,2020年10月21日 14点22分 \",\"照片18,2020年10月21日 14点22分 \",\"照片19,2020年10月21日 14点22分 \",\"照片20,2020年10月21日 14点22分 \",\\\r\n \"照片21,2020年10月21日 14点22分 \",\"照片22,2020年10月21日 14点22分 \",\"照片23,2020年10月21日 14点22分 \",\"照片24,2020年10月21日 14点22分 \",\"照片25,2020年10月21日 14点22分 \",\\\r\n \"照片26,2020年10月21日 14点22分 \",\"照片27,2020年10月21日 14点22分 \",\"照片28,2020年10月21日 14点22分 \",\"照片29,2020年10月21日 14点22分 \",\"照片30,2020年10月21日 14点22分 \"] \"\"\"\r\n# Photo = [\"照片44,2020年10月10日 03点00分 \",\"照片16,2020年10月10日 02点58分 \",\"照片17,2020年10月10日 02点58分 \"]\r\n\r\n\r\n\r\n#定位金额输入框并且输入随机金额(10块到15之间)\r\ndef intoMoney(driver):\r\n money = random.randint(10,15)\r\n el4 = driver.find_element_by_xpath(\"/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/com.uc.webview.export.WebView/com.uc.webkit.ax/android.webkit.WebView/android.view.View/android.view.View[1]/android.view.View/android.view.View[2]/android.view.View[3]/android.widget.EditText\")\r\n el4.send_keys(money)\r\n return(money)\r\n\r\n\r\n\r\n#输入金额并输入密码\r\ndef inputPassword(driver):\r\n driver.tap([(179,1657)],10)\r\n driver.tap([(179,1657)],10)\r\n driver.tap([(179,1347)],10)\r\n driver.tap([(533,1823)],10)\r\n driver.tap([(179,1347)],10)\r\n driver.tap([(892,1509)],10)\r\n time.sleep(5)\r\n driver.tap([(979,149)],40)#完成\r\n time.sleep(2)\r\n driver.tap([(179,140)],40) #退出\r\n time.sleep(2)\r\n \r\n\r\n#分割列表\r\ndef Div_LIST(para=[]):\r\n for j in range(0,len(para),6):\r\n print(para[j:j+6])\r\n\r\n#主函数\r\ndef main():\r\n Money_list = []\r\n driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)\r\n time.sleep(3)\r\n intoSaoyiSao(driver)\r\n\r\n for i in Photo:\r\n count = 1\r\n while count <= 6:\r\n time.sleep(1)\r\n try:\r\n print(\"开始扫码\")\r\n image = driver.find_element_by_accessibility_id(i)\r\n image.click()\r\n time.sleep(5)\r\n money = intoMoney(driver)\r\n Money_list.append(money)\r\n except Exception as e:\r\n while e!=\"True\":\r\n try:\r\n print(\"扫码失败,异常:\",e)\r\n print(\"向下滚动屏幕\")\r\n scrollPhoto(driver)\r\n image = driver.find_element_by_accessibility_id(i)\r\n except:\r\n print(\"继续向下滚动屏幕\")\r\n else:\r\n e = \"True\"\r\n else:\r\n \r\n time.sleep(5)\r\n el5 = driver.find_element_by_xpath(\"/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/com.uc.webview.export.WebView/com.uc.webkit.ax/android.webkit.WebView/android.view.View/android.view.View[1]/android.view.View/android.view.View[3]/android.view.View[1]/android.view.View[1]\")\r\n el5.click()\r\n time.sleep(5)\r\n #el6 = driver.find_element_by_xpath(\"/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/com.uc.webview.export.WebView/com.uc.webkit.ax/android.webkit.WebView/android.view.View/android.view.View/android.view.View/android.view.View[4]/android.widget.Button\")\r\n #el6.click()\r\n #time.sleep(8) #变为自动等待\r\n el6 = WebDriverWait(driver,timeout=8,poll_frequency=0.01).until(lambda x: x.find_element_by_xpath(\"/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/com.uc.webview.export.WebView/com.uc.webkit.ax/android.webkit.WebView/android.view.View/android.view.View/android.view.View/android.view.View[4]/android.widget.Button\"))\r\n el6.click()\r\n el7 = WebDriverWait(driver,timeout=8,poll_frequency=0.01).until(lambda x: x.find_element_by_id(\"com.alipay.android.phone.mobilecommon.verifyidentity:id/go_pwd\"))\r\n #el7 = driver.find_element_by_id(\"com.alipay.android.phone.mobilecommon.verifyidentity:id/go_pwd\")\r\n el7.click()\r\n time.sleep(3)\r\n inputPassword(driver)\r\n intoSaoyiSao(driver)\r\n print(\"第\"+str(count)+\"次扫码结束\")\r\n count += 1\r\n finally:\r\n print(\"*\"*20)\r\n print(Money_list)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n#TODO:1.时间设置为等待 2.完善提示语 3.统计金额并写入文件保存 ", "sub_path": "AlipayAndroid.py", "file_name": "AlipayAndroid.py", "file_ext": "py", "file_size_in_byte": 6718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 62, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 66, "usage_type": "call"}, {"api_name": "appium.webdriver.Remote", "line_number": 77, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 77, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 78, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 84, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 105, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 108, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 112, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 114, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "575561907", "text": "from readCSV import *\r\nfrom skip import *\r\nfrom sym import *\r\nfrom some import *\r\nfrom num import *\r\nfrom config import *\r\nfrom copy import deepcopy\r\nimport random\r\nimport math\r\nimport functools\r\n\r\n'''\r\nReturns whether the given string is a Klass (contains !)\r\n:param: s the string\r\n:return: boolean\r\n'''\r\ndef isKlass(s):\r\n return '!' in s\r\n\r\n'''\r\nReturns whether the given string is a Goal (contains +/-/Klass)\r\n:param: s the string\r\n:return: boolean\r\n'''\r\ndef isGoal(s):\r\n return '+' in s or '-' in s or isKlass(s)\r\n\r\n'''\r\nReturns whether the given string is a Number\r\n:param: s the string\r\n:return: boolean\r\n'''\r\ndef isNum(s):\r\n return s[0].isupper()\r\n\r\n'''\r\nReturns whether the given string is a Klass (contains !)\r\n:param: s the string\r\n:return: boolean\r\n'''\r\ndef isSkip(s):\r\n return '?' in s\r\n\r\n'''\r\nClass for a sample\r\n'''\r\nclass Sample:\r\n \r\n '''\r\n Initialize the sample\r\n rows = rows as dict\r\n keep = ?\r\n cols = columns as dict\r\n name = the names of the columns\r\n x = list of x vars\r\n y = list of y vars\r\n klass = klass value\r\n '''\r\n def __init__(self, inits = []):\r\n self.rows = []\r\n self.keep = True\r\n self.cols = []\r\n self.names = []\r\n self.x = []\r\n self.y = []\r\n self.klass = None\r\n \r\n #initialize the values\r\n for value in inits:\r\n self.add(value)\r\n \r\n '''\r\n Prints the mid point of the sample\r\n '''\r\n def __str__(self):\r\n return str(self.mid())\r\n \r\n '''\r\n Get the mid point of the sample in array form\r\n '''\r\n def mid(self):\r\n goals = []\r\n for col in self.y:\r\n goals.append(col.mid())\r\n return goals\r\n \r\n '''\r\n Clones a new sample\r\n '''\r\n def clone(self):\r\n s = Sample()\r\n s.keep = self.keep\r\n s.klass = self.klass\r\n s.add(self.names)\r\n return s\r\n \r\n '''\r\n Adds a new row\r\n :param lst: the row to add\r\n '''\r\n def add(self, lst):\r\n self.header(lst) if len(self.names) <= 0 else self.data(lst)\r\n \r\n '''\r\n Adds the header\r\n :param lst: the row to add\r\n '''\r\n def header(self, lst):\r\n #save the names for cloning\r\n self.names = lst\r\n \r\n at = 0\r\n for name in lst:\r\n new = None\r\n #find the right type\r\n if isSkip(name):\r\n new = Skip(at, name)\r\n elif isNum(name):\r\n new = Num(at, name)\r\n else:\r\n new = Sym(at, name)\r\n \r\n #add to the cols\r\n self.cols.append(new)\r\n \r\n if not isSkip(name):\r\n if isGoal(name):\r\n self.y.append(new)\r\n else:\r\n self.x.append(new)\r\n at+=1\r\n \r\n '''\r\n Adds the data\r\n :param lst: the row to add\r\n '''\r\n def data(self, lst):\r\n #add to each of the cols\r\n for c in self.cols:\r\n c.add(lst[c.at])\r\n \r\n if self.keep:\r\n self.rows.append(lst)\r\n \r\n '''\r\n Reads in from a csv file\r\n Uses readCSV from HW2\r\n :param f: the file to read\r\n '''\r\n def fromFile(self, f):\r\n for row in readCSV(f):\r\n self.add(row)\r\n \r\n '''\r\n Query to get the better row using Zitler's continuous domination indicator\r\n \"row1 is better than row2\"\r\n :param row1: the first row\r\n :param row2: the second row\r\n :return: the comparison\r\n '''\r\n def better(self, row1, row2):\r\n s1 = 0\r\n s2 = 0\r\n n = len(self.y)\r\n e = 2.71828\r\n \r\n for col in self.y:\r\n w = col.w\r\n x = col.norm(row1[col.at])\r\n if x==\"?\":\r\n #row1 is NOT better than row2\r\n return False\r\n y = col.norm(row2[col.at])\r\n if y==\"?\":\r\n #row1 is NOT better than row2\r\n return True\r\n s1 = s1 - math.pow(e, w * (x-y)/n)\r\n s2 = s2 - math.pow(e, w * (y-x)/n)\r\n #\"row1 is better than row2\"\r\n return s1/n < s2/n\r\n \r\n '''\r\n Compare function using better\r\n \"row1 is better than row2\"\r\n :param row1: the first row\r\n :param row2: the second row\r\n :return: the comparison\r\n '''\r\n def rowCompare(self, row1, row2):\r\n #if row1 is better than row2, then no switch is needed\r\n if self.better(row1, row2):\r\n return -1\r\n else:\r\n return 1\r\n \r\n '''\r\n Compare function using better\r\n \"s1 is better than s2\"\r\n :param s1: the first sample\r\n :param s1: the second sample\r\n :return: the comparison\r\n '''\r\n def sampleCompare(self, s1, s2):\r\n #if s1 is better than s2, then no switch is needed\r\n s1mid = []\r\n for col in s1.cols:\r\n s1mid.append(col.mid())\r\n s2mid = []\r\n for col in s2.cols:\r\n s2mid.append(col.mid())\r\n if self.better(s1mid, s2mid):\r\n return -1\r\n else:\r\n return 1\r\n \r\n '''\r\n Distance function between two rows\r\n :param row1: the first row\r\n :param row2: the second row\r\n :return: the distance\r\n '''\r\n def dist(self, row1, row2):\r\n d = 0\r\n n = 1e-32\r\n for col in self.cols:\r\n n = n + 1\r\n a = row1[col.at]\r\n b = row2[col.at]\r\n if a == '?' and b == '?':\r\n d = d + 1\r\n else:\r\n d = d + pow(col.dist(a,b),Config.p)\r\n return pow( d/n , 1/Config.p )\r\n \r\n '''\r\n Neighbors function to get an array of tuples\r\n :param r1: the row to compare\r\n :param rows: the rows to compare to, or default all of the rows\r\n '''\r\n def neighbors(self, r1, rows = {}):\r\n if not rows:\r\n rows = self.rows\r\n a = []\r\n for r2 in rows:\r\n if self.dist(r1, r2) != 0.0:\r\n a.append((self.dist(r1, r2), r2))\r\n a = sorted(a, key=lambda row: row[0])\r\n return a\r\n \r\n '''\r\n Faraway function finds the point that is Confix.far% away \r\n :param row: the row to find a point far away from\r\n :return: that far point\r\n '''\r\n def faraway(self, row):\r\n a = self.neighbors(row, self.rows if len(self.rows) < Config.samples else random.sample(self.rows, Config.samples))\r\n return a[math.floor(Config.far*len(a))][1]\r\n \r\n '''\r\n Does 1 random projection based on 2 distant points\r\n :param rows: the rows we are using\r\n '''\r\n def div1(self, rows):\r\n one = self.faraway(random.choice(rows))\r\n two = self.faraway(one)\r\n c = self.dist(one, two)\r\n tmp = []\r\n \r\n if Config.verbose:\r\n print(\"c={:.2} \".format(c))\r\n \r\n for row in rows:\r\n a = self.dist(row, one)\r\n b = self.dist(row, two)\r\n x = (pow(a,2) + pow(c,2) - pow(b,2)) / (2*c)\r\n tmp.append((x, row))\r\n tmp = sorted(tmp, key=lambda row: row[0])\r\n mid = math.floor(len(rows)/2)\r\n return [x[1] for x in tmp[:mid]], [x[1] for x in tmp[mid:]]\r\n \r\n '''\r\n Recurrsive helper method for div\r\n :param rows: the rows we are splitting\r\n :param level: the level we are on\r\n :param leafs: the leafs array\r\n :param enough: how we know when to stop\r\n '''\r\n def divR(self, rows, level, leafs, enough):\r\n #printing\r\n if Config.verbose:\r\n for i in range(level + 1):\r\n print(\"|.. \", end='')\r\n print(f\"n={len(rows)} \", end='')\r\n \r\n #if we have to stop when we are smaller than enough\r\n if len(rows) < enough:\r\n s = Sample([self.names] + rows)\r\n if Config.verbose:\r\n print(s)\r\n leafs.append(s)\r\n else:\r\n #divide and recurse\r\n left, right = self.div1(rows)\r\n self.divR(left, level+1, leafs, enough)\r\n self.divR(right, level+1, leafs, enough)\r\n \r\n '''\r\n Does many random projections (until size hits Config.enough)\r\n '''\r\n def divs(self):\r\n leafs = []\r\n enough = pow(len(self.rows), Config.enough)\r\n #perform the recurssive function\r\n self.divR(self.rows, 0, leafs, enough)\r\n #sort the leaves\r\n leafs.sort(key=functools.cmp_to_key(self.sampleCompare))\r\n return leafs\r\n \r\n '''\r\n Discretize values\r\n '''\r\n def discretize(self):\r\n arr = []\r\n clusters = self.divs()\r\n best, worst = clusters[0], clusters[-1]\r\n #zip them together and discretize the good from bad for each\r\n for good, bad in zip(best.x, worst.x):\r\n for d in good.discretize(good, bad):\r\n arr.append(d)\r\n return arr\r\n \r\n '''\r\n ys(fmt)\r\n Report the central tendcany of goals\r\n '''\r\n def ys(self, fmt):\r\n return \r\n \r\n '''\r\n Discretize with Binary Chops\r\n '''\r\n def binaryChops(self):\r\n arr = []\r\n clusters = self.divs()\r\n best, worst = clusters[0], clusters[-1]\r\n for i, x in enumerate(self.x):\r\n if isinstance(x, Num): \r\n #Count the best and worst in this new range\r\n bestCount = len([y for y in best.x[i]._all() if y <= x.mid()])\r\n worstCount = len([y for y in worst.x[i]._all() if y <= x.mid()])\r\n if (bestCount != 0 or worstCount!=0) and (bestCount != len(best.x[i]._all()) or worstCount != len(worst.x[i]._all())):\r\n arr.append(Discretization(at=x.at, name=x.name, lo=x.lo, hi=x.mid(), best= bestCount, rest=worstCount, first = True, last = False))\r\n \r\n bestCount = len([y for y in best.x[i]._all() if y > x.mid()])\r\n worstCount = len([y for y in worst.x[i]._all() if y > x.mid()])\r\n if (bestCount != 0 or worstCount!=0) and (bestCount != len(best.x[i]._all()) or worstCount != len(worst.x[i]._all())):\r\n arr.append(Discretization(at=x.at, name=x.name, lo=x.mid(), hi=x.hi, best= bestCount, rest=worstCount, first = False, last = True))\r\n else:\r\n for good, bad in zip(best.x, worst.x):\r\n for d in good.discretize(good, bad):\r\n arr.append(d)\r\n return arr;", "sub_path": "src/py/sample.py", "file_name": "sample.py", "file_ext": "py", "file_size_in_byte": 9154, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "math.pow", "line_number": 177, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 178, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 256, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 257, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 264, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 278, "usage_type": "call"}, {"api_name": "functools.cmp_to_key", "line_number": 316, "usage_type": "call"}]} +{"seq_id": "378343472", "text": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nimport model.resnet as models\nimport model.mobilenet as model_MV2\n\n\nclass _ASPPModule(nn.Module):\n def __init__(self, inplanes, planes, kernel_size, padding, dilation):\n super(_ASPPModule, self).__init__()\n self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,\n stride=1, padding=padding, dilation=dilation, bias=False)\n self.bn = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU()\n\n self._init_weight()\n\n def forward(self, x):\n x = self.atrous_conv(x)\n x = self.bn(x)\n\n return self.relu(x)\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\nclass ASPP(nn.Module):\n def __init__(self, backbone, output_stride):\n super(ASPP, self).__init__()\n if backbone == 'drn':\n inplanes = 512\n elif backbone == 'mobilenet':\n inplanes = 320\n else:\n inplanes = 2048\n if output_stride == 16:\n dilations = [1, 6, 12, 18]\n elif output_stride == 8:\n dilations = [1, 12, 24, 36]\n else:\n raise NotImplementedError\n\n self.aspp1 = _ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0])\n self.aspp2 = _ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1])\n self.aspp3 = _ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2])\n self.aspp4 = _ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3])\n\n self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),\n nn.Conv2d(inplanes, 256, 1, stride=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU())\n self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)\n self.bn1 = nn.BatchNorm2d(256)\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(0.5)\n self._init_weight()\n\n def forward(self, x):\n x1 = self.aspp1(x)\n x2 = self.aspp2(x)\n x3 = self.aspp3(x)\n x4 = self.aspp4(x)\n x5 = self.global_avg_pool(x)\n x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)\n x = torch.cat((x1, x2, x3, x4, x5), dim=1)\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n return self.dropout(x)\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\n\nclass Deeplabv3(nn.Module):\n def __init__(self, layers=50, bins=(1, 2, 3, 6), dropout=0.1, classes=2, zoom_factor=8, use_ppm=True,\n backbone = 'resnet', output_stride = 8, criterion=nn.CrossEntropyLoss(ignore_index=255), pretrained=True):\n super(Deeplabv3, self).__init__()\n assert layers in [18, 19, 34, 50, 101, 152]\n assert 2048 % len(bins) == 0\n assert classes > 1\n assert zoom_factor in [1, 2, 4, 8]\n self.zoom_factor = zoom_factor\n self.use_ppm = use_ppm\n self.criterion = criterion\n self.backbone=backbone\n\n if backbone == 'mobilenet':\n self.mobilenet = model_MV2.MobileNetV2(output_stride=output_stride, pretrained=pretrained)\n else:\n if layers == 18:\n resnet = models.resnet18(pretrained=pretrained, deep_base=False)\n elif layers == 34:\n resnet = models.resnet34(pretrained=pretrained, deep_base=False)\n elif layers == 50:\n resnet = models.resnet50(pretrained=pretrained)\n elif layers == 101:\n resnet = models.resnet101(pretrained=pretrained)\n else:\n resnet = models.resnet152(pretrained=pretrained)\n\n if layers >= 50:\n self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.conv2, resnet.bn2,\n resnet.relu, resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool)\n else:\n self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)\n self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4\n\n if backbone == 'drn':\n inplanes = 512\n elif backbone == 'mobilenet':\n inplanes = 320\n else:\n inplanes = 2048\n\n self.aspp = ASPP(backbone, output_stride)\n if use_ppm:\n self.cls = nn.Sequential(\n nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Dropout2d(p=dropout),\n nn.Conv2d(256, classes, kernel_size=1)\n )\n else:\n self.cls = nn.Sequential(\n nn.Conv2d(inplanes, 256, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Dropout2d(p=dropout),\n nn.Conv2d(256, classes, kernel_size=1)\n )\n\n if backbone == 'resnet':\n if output_stride == 8:\n for n, m in self.layer3.named_modules():\n if 'conv2' in n:\n m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)\n elif 'downsample.0' in n:\n m.stride = (1, 1)\n for n, m in self.layer4.named_modules():\n if 'conv2' in n:\n m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)\n elif 'downsample.0' in n:\n m.stride = (1, 1)\n elif output_stride == 16:\n for n, m in self.layer4.named_modules():\n if 'conv2' in n:\n m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)\n elif 'downsample.0' in n:\n m.stride = (1, 1)\n\n self.aux = nn.Sequential(\n nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Dropout2d(p=dropout),\n nn.Conv2d(256, classes, kernel_size=1)\n )\n\n if backbone == 'mobilenet':\n self.aux = nn.Sequential(\n nn.Conv2d(96, 96, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(96),\n nn.ReLU(inplace=True),\n nn.Dropout2d(p=dropout),\n nn.Conv2d(96, classes, kernel_size=1)\n )\n\n if backbone == 'drn':\n if output_stride == 8:\n for n, m in self.layer3.named_modules():\n if 'conv' in n:\n m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)\n elif 'downsample.0' in n:\n m.stride = (1, 1)\n for n, m in self.layer4.named_modules():\n if 'conv' in n:\n m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)\n elif 'downsample.0' in n:\n m.stride = (1, 1)\n elif output_stride == 16:\n for n, m in self.layer4.named_modules():\n if 'conv' in n:\n m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)\n elif 'downsample.0' in n:\n m.stride = (1, 1)\n\n self.aux = nn.Sequential(\n nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Dropout2d(p=dropout),\n nn.Conv2d(256, classes, kernel_size=1)\n )\n\n def forward(self, x, y=None, get_feat=False):\n x_size = x.size()\n if (y is not None) and get_feat:\n assert (x_size[2]) % 8 == 0 and (x_size[3]) % 8 == 0\n h = int((x_size[2]) / 8 * self.zoom_factor)\n w = int((x_size[3]) / 8 * self.zoom_factor)\n else:\n h = int(x_size[2])\n w = int(x_size[3])\n\n if self.backbone == 'mobilenet':\n x1 = self.mobilenet.features[0:4](x)\n x2 = self.mobilenet.features[4:7](x1)\n x3 = self.mobilenet.features[7:14](x2)\n x4 = self.mobilenet.features[14:](x3)\n else:\n x0 = self.layer0(x)\n x1 = self.layer1(x0)\n x2 = self.layer2(x1)\n x3 = self.layer3(x2)\n x4 = self.layer4(x3)\n aux = self.aux(x3)\n\n if self.use_ppm:\n x_spp = self.aspp(x4)\n else:\n x_spp = x4\n pred = self.cls(x_spp)\n if self.zoom_factor != 1:\n pred = F.interpolate(pred, size=(h, w), mode='bilinear', align_corners=True)\n\n if get_feat:\n return [pred, aux, x_spp, x4, x3, x2, x1]\n\n if self.training:\n if self.zoom_factor != 1:\n aux = F.interpolate(aux, size=(h, w), mode='bilinear', align_corners=True)\n main_loss = self.criterion(pred, y)\n aux_loss = self.criterion(aux, y)\n return pred.max(1)[1], main_loss, aux_loss\n else:\n return pred\n\n\nif __name__ == '__main__':\n import os\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n input = torch.rand(4, 3, 473, 473).cuda()\n model = Deeplabv3(layers=18, classes=19,\n zoom_factor=8, backbone='mobilenet',\n output_stride=16).cuda()\n model.eval()\n model.training = False\n # print(model)\n # output = model(input, keys=F.normalize(torch.rand((20, 256), dtype=torch.float), dim=1).cuda(), get_feat=True, is_train_mem=False)\n # print('Deeplabv3_mem', output.size())\n\n from ptflops import get_model_complexity_info\n\n with torch.cuda.device(0):\n flops, params = get_model_complexity_info(model, (3, 688, 512), as_strings=True, print_per_layer_stat=True)\n\n print('{:<30} {:<8}'.format('Computational complexity: ', flops))\n print('{:<30} {:<8}'.format('Number of parameters: ', params))\n", "sub_path": "model/deeplabv3.py", "file_name": "deeplabv3.py", "file_ext": "py", "file_size_in_byte": 10667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "model.mobilenet.MobileNetV2", "line_number": 103, "usage_type": "call"}, {"api_name": "model.mobilenet", "line_number": 103, "usage_type": "name"}, {"api_name": "model.resnet.resnet18", "line_number": 106, "usage_type": "call"}, {"api_name": "model.resnet", "line_number": 106, "usage_type": "name"}, {"api_name": "model.resnet.resnet34", "line_number": 108, "usage_type": "call"}, {"api_name": "model.resnet", "line_number": 108, "usage_type": "name"}, {"api_name": "model.resnet.resnet50", "line_number": 110, "usage_type": "call"}, {"api_name": "model.resnet", "line_number": 110, "usage_type": "name"}, {"api_name": "model.resnet.resnet101", "line_number": 112, "usage_type": "call"}, {"api_name": "model.resnet", "line_number": 112, "usage_type": "name"}, {"api_name": "model.resnet.resnet152", "line_number": 114, "usage_type": "call"}, {"api_name": "model.resnet", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 132, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 133, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 134, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 135, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 140, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 141, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 142, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 145, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 167, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 168, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 169, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 170, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 171, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 172, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 176, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 178, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 179, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 179, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 180, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 181, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 203, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 203, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 204, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 205, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 206, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 206, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 207, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 208, "usage_type": "name"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 240, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 240, "usage_type": "name"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 247, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 247, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 258, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 259, "usage_type": "call"}, {"api_name": "model.resnet", "line_number": 260, "usage_type": "name"}, {"api_name": "model.resnet.eval", "line_number": 263, "usage_type": "call"}, {"api_name": "model.resnet", "line_number": 263, "usage_type": "name"}, {"api_name": "model.resnet.training", "line_number": 264, "usage_type": "attribute"}, {"api_name": "model.resnet", "line_number": 264, "usage_type": "name"}, {"api_name": "torch.cuda.device", "line_number": 271, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 271, "usage_type": "attribute"}, {"api_name": "ptflops.get_model_complexity_info", "line_number": 272, "usage_type": "call"}, {"api_name": "model.resnet", "line_number": 272, "usage_type": "argument"}]} +{"seq_id": "453649682", "text": "from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom user.views import show_login_views, do_login_views, show_register_views, do_register_views, do_answer_views\n\nurlpatterns = [\n # 用户请求登录页面\n url(r'^login/$', show_login_views),\n # 处理用户从登录页面请求的信息\n url(r'^dologin/$', do_login_views),\n # 用户请求注册页面\n url(r'^register/$', show_register_views),\n # 处理用户注册请求\n url(r'^doregister/$', do_register_views),\n # 用户登录之后进入功能选择\n # url(r'^dochoice/$', do_choice_views),\n # 进入答题系统\n url(r'^answer/$', do_answer_views),\n]", "sub_path": "user/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 664, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "user.views.show_login_views", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "user.views.do_login_views", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "user.views.show_register_views", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "user.views.do_register_views", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "user.views.do_answer_views", "line_number": 18, "usage_type": "argument"}]} +{"seq_id": "433483155", "text": "\"\"\"@package docstring\nParental Control Package is used to execute rover parental control process\n\"\"\"\nfrom framework.globalconfig.gc import *\nimport requests\nimport re\nimport json\nimport multiprocessing\nfrom libs.devices import RaspberryPi\n\nDEFAULT_ROVER_PATH = '/opt/symantec/rover'\nDEFAULT_HOST_IP = 'sitereview.bluecoat.com'\nDEFAULT_HOST_URL = 'https://%s/details?url='\nDEFAULT_PI_IP = '192.168.0.1'\n\nclass bcolors:\n OKGREEN = '\\033[92m'\n FAIL = '\\033[91m'\n WARNING = '\\033[93m'\n ENDC = '\\033[0m'\n\n\nrequests.packages.urllib3.disable_warnings()\n\n\ndef pool_worker(jobs):\n status = False\n p = multiprocessing.current_process()\n colorBlue(\"\\nStarting...%s (%s)\" % (p.name, p.pid))\n try:\n piIP = jobs[0]\n data = jobs[1]\n pi_handle = RaspberryPi.RaspberryPi(interface=piIP)\n\n start = time.time()\n\n response = pi_handle.SendCommand(\n cmd='python /root/ExecuteOperation.py --procname %s test_contect_block --url %s'\n % (p.pid, data.strip()), timeout=70, check_cmd=False)\n if not re.search(\"PASS\", response, re.M):\n raise ValueError(\"Contect Black check failed on URL %s\" % data)\n\n end = time.time()\n time_elapsed = end - start\n\n colorGreen(\"[%s] SUCCESS: Contect Black check on URL %s in %s seconds\" \\\n % (p.name, data.strip(), time_elapsed))\n\n status = True\n except Exception as e:\n colorRed(\"Error [%s]: Contect Black check on URL %s FAILED: %s\" % (p.name, data.strip(),e))\n finally:\n pi_handle.close()\n colorBlue(\"Exiting...%s(%s)\\n\" % (p.name, p.pid))\n sys.stdout.flush()\n return status\n\nclass parentalcontrol():\n \"\"\"\n Class provides APIs to test WRS on Rover.\n \"\"\"\n def __init__(self, ratings_ip=DEFAULT_HOST_IP):\n self.host_url = DEFAULT_HOST_URL % ratings_ip\n self.count = 0\n self.good_rating = ['g', 'u', 'w', 'r']\n self.known_ratings = ['b', 'g', 'u', 'w', 'r']\n self.rating_key = ['b', 'g', 'u', 'w', 'r']\n self.head = {\n 'X-Symc-Rulespace': 'true',\n 'Accept': 'application/json'\n }\n\n def trigger_parallel_parentalcontrol_check(self, piIP, url_file):\n status = True\n try:\n url_list = []\n with open(url_file, 'r') as fp:\n url_list = fp.readlines()\n\n jobs = [(piIP, url) for url in url_list]\n pool_size = multiprocessing.cpu_count() * 2 - 1\n colorBlue(\"Pool Size: %s\" % str(pool_size))\n chunk_size = int(len(jobs) * 0.2)\n colorBlue(\"Chunk Size: %s\" % str(chunk_size))\n pool = multiprocessing.Pool(processes=pool_size)\n\n pool_outputs = pool.map(pool_worker, jobs, chunksize=chunk_size)\n\n pool.close()\n pool.join()\n\n colorBlue(\"Return Status of Pool workers: %s\\n\" % pool_outputs)\n if False in pool_outputs:\n status = False\n except Exception as e:\n colorRed(\"Contect Black Check failed: %s\" % e)\n finally:\n return status\n\n def __check_policy_parameter(self, policy, function_name):\n \"\"\"\n Private utility function to check 'policy' parameter on given function_name is not None\n :param policy: the policy parameter\n :param function_name: the name of the function with policy parameter\n \"\"\"\n if function_name is None:\n raise ValueError(\"Invalid function_name value. Expected not None value.\")\n\n if policy is None:\n error_msg = (\"Invalid parameter provided to %s.\\n\"\n \"Expecting not None values. Actual param provided:\\n\\t\"\n \"policy [%s]\") % (function_name, policy)\n raise ValueError(error_msg)\n\n def get_policy(self, dut, userId):\n \"\"\"\n Retrieves the policy from the parental control json file on the gateway for the provider userId.\n Refer to JSON_PARENTAL_CONTROL_POLICIES_FROM_CLOUD\n :param dut: a reference to the gateway (Rover)\n :param userId: the user id\n :return: the policy from the parental control json file on the gateway for the provider userId.\n \"\"\"\n if (dut is None) or (userId is None):\n error_msg = (\"Invalid parameter(s) provided to get_policy.\\n\"\n \"Expecting not None values. Actual param provided:\\n\\t\"\n \"dut [%s], userId [%s]\") % (dut, userId)\n raise ValueError(error_msg)\n\n pc_json = dut.read_file_from_dut_as_json(JSON_PARENTAL_CONTROL_POLICIES_FROM_CLOUD)\n if \"policies\" in pc_json:\n policies_list = pc_json[\"policies\"]\n for policy in policies_list:\n if \"id\" in policy:\n if policy[\"id\"] == userId:\n return policy\n else:\n raise ValueError(\"Invalid policy item on gateway. Expecting id attribute\")\n raise ValueError(\"There is no policy on gateway for user id %s\" % userId)\n else:\n raise ValueError(\"Invalid parental control policy file on gateway. Expecting policies attribute.\")\n\n def get_devices_from_policy(self, policy):\n \"\"\"\n Retrieves the deviceIds entry from the provided policy.\n :param policy:\n :return: the deviceIds entry from the provided policy.\n \"\"\"\n self.__check_policy_parameter(policy, \"get_devices_from_policy\")\n\n if \"deviceIds\" in policy:\n return policy[\"deviceIds\"]\n else:\n raise ValueError(\"Invalid policy item on gateway. Expecting deviceIds attribute.\")\n\n def is_device_on_device_list(self, target_device, devices):\n \"\"\"\n True if target_device appears exactly one time on provided devices, False otherwise\n :param target_device: the device to look for in the provided devices\n :param devices: list of devices to search on.\n :return: True if target_device appears exactly one time on provided devices, False otherwise\n \"\"\"\n if (target_device is None) or (devices is None):\n error_msg = (\"Invalid parameter(s) provided to is_device_on_device_list.\\n\"\n \"Expecting not None values. Actual param provided:\\n\\t\"\n \"target_device [%s], devices [%s]\") % (target_device, devices)\n raise ValueError(error_msg)\n device_matches = filter(lambda device: device == target_device, devices)\n return len(device_matches) == 1\n\n def get_parental_control_from_policy(self, policy):\n \"\"\"\n Retrieves the parentalControl entry from the provided policy\n :param policy:\n :return: the parentalControl entry from the provided policy\n \"\"\"\n self.__check_policy_parameter(policy, \"get_parental_control_from_policy\")\n if \"policy\" in policy:\n policy_entry = policy[\"policy\"]\n if \"parentalControl\" in policy_entry:\n return policy_entry[\"parentalControl\"]\n else:\n raise ValueError(\"Invalid policy item on gateway. Expecting parentalControl attribute. Actual policy: %s\" % policy)\n else:\n raise ValueError(\"Invalid policy item on gateway. Expecting policy attribute. Actual policy: %s\" % policy)\n\n def get_time_entry_from_policy(self, policy):\n \"\"\"\n Retrieves the time entry under parentalControl entry from the provided policy\n :param policy:\n :return: the time entry under parentalControl entry from the provided policy\n \"\"\"\n self.__check_policy_parameter(policy, \"get_time_entry_from_policy\")\n parental_control_entry = self.get_parental_control_from_policy(policy)\n if \"time\" in parental_control_entry:\n return parental_control_entry[\"time\"]\n else:\n raise ValueError(\"Invalid policy item on gateway. expecting time attribute inside parentalControl.\")\n\n def get_bedtime_from_policy(self, policy):\n \"\"\"\n Retrieves the curfew entry under time entry under parentalControl entry from the provided policy\n :param policy:\n :return: the curfew entry under time entry under parentalControl entry from the provided policy\n \"\"\"\n self.__check_policy_parameter(policy, \"get_bedtime_from_policy\")\n time_entry = self.get_time_entry_from_policy(policy)\n\n if \"curfew\" in time_entry:\n return time_entry[\"curfew\"]\n else:\n raise ValueError(\"Invalid bedtime item on gateway. Expecting curfew attribute inside time.\")\n\n def get_dailytime_from_policy(self, policy):\n \"\"\"\n Retrieves the limits entry under time entry under parentalControl entry from the provided policy\n :param policy:\n :return: the limits entry under time entry under parentalControl entry from the provided policy\n \"\"\"\n self.__check_policy_parameter(policy, \"get_dailytime_from_policy\")\n time_entry = self.get_time_entry_from_policy(policy)\n if \"limits\" in time_entry:\n return time_entry[\"limits\"]\n else:\n raise ValueError(\"Invalid dailytime item on gateway. Expecting limits attribute.\")\n\n def get_content_from_policy(self, policy):\n \"\"\"\n Retrieves the content entry under parentalControl entry from the provided policy\n :param policy:\n :return:\n \"\"\"\n self.__check_policy_parameter(policy, \"get_content_from_policy\")\n parental_control_entry = self.get_parental_control_from_policy(policy)\n if \"content\" in parental_control_entry:\n return parental_control_entry[\"content\"]\n else:\n raise ValueError(\"Invalid content item on gateway. Expecting content attribute.\")\n\n def get_gateway_pauseInternet_by_user_id(self, dut, user_id):\n \"\"\"\n Retrieves the pauseInternet entry under parentalControl entry from the provided user_id's policy\n :param dut: reference to the gateway (Rover)\n :param user_id: the user id\n :return: the pauseInternet entry under parentalControl entry from the provided user_id's policy\n \"\"\"\n if (dut is None) or (user_id is None):\n error_msg = (\"Invalid parameter(s) provided to get_gateway_pauseInternet_by_user_id.\\n\"\n \"Expecting not None values. Actual param provided:\\n\\t\"\n \"dut [%s], user_id [%s]\") % (dut, user_id)\n raise ValueError(error_msg)\n\n policy = self.get_policy(dut, user_id)\n\n if not policy:\n raise ValueError(\"get_gateway_pauseInternet_by_user_id - Unable to retrieve policy for user id [%s]\" % user_id)\n\n parental_control_entry = self.get_parental_control_from_policy(policy)\n\n if not parental_control_entry:\n raise ValueError(\"get_gateway_pauseInternet_by_user_id - Unable to retrieve parental control entry for user id [%s]\" % user_id)\n\n if \"pauseInternet\" in parental_control_entry:\n return parental_control_entry[\"pauseInternet\"]\n else:\n raise ValueError(\"Invalid parental control entry on gateway. Expecting pauseInternet attribute.\")\n\n def verify_gateway_single_user_pauseInternet(self, dut, user_id, expected_pause_internet_value):\n \"\"\"\n Verifies if pauseInternet entry under parentalControl entry from the provided user_id's policy matches expected_pause_internet_value\n :param dut: a reference to the gateway (Rover)\n :param user_id: the user id\n :param expected_pause_internet_value: bool, the expected value for pauseInternet entry\n :return: True if pauseInternet entry under parentalControl entry from the provided user_id's policy matches expected_pause_internet_value,\n False otherwise\n \"\"\"\n try:\n if (dut is None) or (user_id is None) or (expected_pause_internet_value is None):\n error_msg = (\"Invalid parameter(s) provided to verify_getway_single_user_pauseInternet.\\n\"\n \"Expecting not None values. Actual param provided:\\n\\t\"\n \"dut [%s], user_id [%s], expected_pause_internet_value [%s]\"\n ) % (dut, user_id, expected_pause_internet_value)\n raise ValueError(error_msg)\n logging.info(\"Verifying pauseInternet for single user in gateway ...\")\n actual_pause_internet_value = self.get_gateway_pauseInternet_by_user_id(dut, user_id)\n logging.info(\"expected pauseInternet value is [%s] on gateway for user id [%s]\" % (expected_pause_internet_value, user_id))\n logging.info(\"actual pauseInternet value is [%s] on gateway for user id [%s]\" % (actual_pause_internet_value, user_id))\n return actual_pause_internet_value == expected_pause_internet_value\n\n except Exception as e:\n raise ValueError(\"Exception when verifying gateway single user pauseInternet. %s\" % e)\n\n def verify_gateway_policy_matches(self, gateway_user_policy, expectedPolicy):\n status = False\n try:\n actualBedTimePolicy = self.get_bedtime_from_policy(gateway_user_policy)['general'][0]\n if sorted(actualBedTimePolicy['days'].items()) == sorted(expectedPolicy['days'].items()):\n status = True\n colorGreen(\"Gateway Policy matches \")\n else:\n colorGreen((\"Gateway Policy mismatch Actual = %s, Expected = %s\" % (actualBedTimePolicy, expectedPolicy)))\n except Exception as e:\n raise ValueError(\"Exception when verifying Gateway Policy.\")\n return status\n\n def verify_bedtime_settings(self, pi, chosenHost):\n status = False\n try:\n\n pingstatus = pi.check_ping_on_raspeberrypi(chosenHost)\n if pingstatus:\n colorGreen(\"Bedtime settings did not worked.\")\n else:\n status = True\n colorGreen(\"Gateway Policy worked. Ping host failed with bedtime settings\")\n return status\n except Exception as e:\n raise ValueError(\"Exception when verifying BedTime Policy.\")\n return status\n\n def verify_gateway_limits_matches(self, gateway_user_policy, expectedPolicy):\n status = False\n try:\n actualLimitsPolicy = self.get_dailytime_from_policy(gateway_user_policy)['general'][0]\n if sorted(actualLimitsPolicy['days'].items()) == sorted(expectedPolicy['days'].items()):\n status = True\n colorGreen(\"Gateway Limits Policy matches\")\n else:\n colorRed((\"Gateway Limits Policy mismatch Actual = %s , Expected = %s\" % (actualLimitsPolicy,expectedPolicy)))\n except Exception as e:\n raise ValueError(\"Exception when verifying Gateway Limit Policy.\")\n return status\n\n def check_content_on_raspeberrypi(self, pi, chosenHost):\n '''\n :param pi: raspberry pi object\n :param chosenHost: content we want to block\n :return: true/false\n '''\n status = True\n try:\n # checking the blocked urls on the assgnied device [here the device is raspberry pi]\n pingstatus = pi.check_content_on_raspeberrypi(chosenHost)\n if pingstatus:\n colorGreen(\" settings did worked.\")\n else:\n status = False\n colorGreen(\"Content Policy worked. But Content blocking failed\")\n return status\n except Exception as e:\n raise ValueError(\"Exception when ping host.\")\n return status\n\n def verify_blockpage_message(self, pi, hostname):\n '''\n :param pi: raspberry pi object\n :param hostname: url that you want to reach\n :return: true/false\n '''\n status = False\n try:\n valid_ping = pi.check_ping_on_raspeberrypi(hostname)\n pingstatus = pi.check_web_status(hostname, \"302\")\n if valid_ping and pingstatus:\n colorGreen(\"Blocked page appeared when bedtime limits reached\")\n else:\n status = True\n colorGreen(\"Block page did not appear when bedtime limits were reached\")\n return status\n except Exception as e:\n raise ValueError(\"Exception when verifying BedTime Policy.\")\n return status\n\n def check_current_time_isvalid_bedtime(self):\n '''\n This function will check if the current time is valid for setting bedtime\n :return: true/false\n '''\n status = True\n try:\n now_time = datetime.datetime.now().time()\n if now_time >= datetime.time(11, 30) and now_time <= datetime.time(12, 30):\n colorRed(\"Bedtime settings will not work. Current time %s\" % now_time)\n status = False\n except Exception as e:\n raise ValueError(\"Exception when checking if current time is valid for bedtime settings.\")\n return status\n\n def verify_gateway_policy(self, gateway_user_policy, expectedPolicy):\n status = False\n try:\n actualLimitsPolicy = self.get_content_from_policy(gateway_user_policy)\n if (actualLimitsPolicy['sites']) == (expectedPolicy['policy']['parentalControl']['content']['sites']):\n status = True\n colorGreen(\"Gateway Limits Policy matches\")\n else:\n colorRed((\"Gateway Limits Policy mismatch Actual = %s , Expected = %s\" % (actualLimitsPolicy,expectedPolicy)))\n except Exception as e:\n raise ValueError(\"Exception when verifying Gateway Limit Policy.\")\n return status\n\n def verify_content_block_gateway_policy(self, gateway_user_policy=None, expectedPolicy=None):\n status = False\n try:\n if (gateway_user_policy is None) or (expectedPolicy is None):\n error_msg = (\"Invalid parameter(s) provided to verify_content_block_gateway_policy.\\n\"\n \"Expecting not None values. Actual param provided:\\n\\t\"\n \"gateway_user_policy [%s], expectedPolicy [%s]\") % (gateway_user_policy, expectedPolicy)\n raise ValueError(error_msg)\n actualLimitsPolicy = self.get_content_from_policy(gateway_user_policy)\n if not actualLimitsPolicy:\n raise ValueError(\"Actual Limits Policy from get_content_from_policy(gateway_user_policy) not available\")\n if (actualLimitsPolicy['categories']) == (expectedPolicy['policy']['parentalControl']['content']['categories']):\n status = True\n colorGreen(\"Gateway Blocked Category Policy matches\")\n else:\n colorRed(\n (\"Gateway Blocked Category Policy mismatch Actual = %s , Expected = %s\" % (actualLimitsPolicy, expectedPolicy)))\n except Exception as e:\n raise ValueError(\"Exception when verifying Gateway Gateway Blocked Category Policy.\")\n finally:\n return status", "sub_path": "libs/parentalcontrol/parentalcontrol.py", "file_name": "parentalcontrol.py", "file_ext": "py", "file_size_in_byte": 19195, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "requests.packages.urllib3.disable_warnings", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.packages", "line_number": 23, "usage_type": "attribute"}, {"api_name": "multiprocessing.current_process", "line_number": 28, "usage_type": "call"}, {"api_name": "libs.devices.RaspberryPi.RaspberryPi", "line_number": 33, "usage_type": "call"}, {"api_name": "libs.devices.RaspberryPi", "line_number": 33, "usage_type": "name"}, {"api_name": "re.search", "line_number": 40, "usage_type": "call"}, {"api_name": "re.M", "line_number": 40, "usage_type": "attribute"}, {"api_name": "multiprocessing.cpu_count", "line_number": 81, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "480167625", "text": "import math\nfrom functools import reduce\n\n\ndef lcm_base(x, y):\n return (x * y) // math.gcd(x, y)\n\n\ndef lcm(numbers):\n return reduce(lcm_base, numbers, 1)\n\n\ndef solve():\n N, M = [int(x) for x in input().split()]\n S = input()\n T = input()\n\n L = lcm([N, M])\n ic, jc = 0, 0\n i, j = ic * L // N + 1, jc * L // M + 1\n ans = L\n while ic < N and jc < M:\n # print(i, j, ic, jc, S[ic], S[jc])\n if i == j:\n if S[ic] != T[jc]:\n ans = '-1'\n break\n ic += 1\n jc += 1\n i = ic * L // N + 1\n j = jc * L // M + 1\n elif i < j:\n ic += 1\n i = ic * L // N + 1\n elif i > j:\n jc += 1\n j = jc * L // M + 1\n print(ans)\n\n\nif __name__ == '__main__':\n solve()\n", "sub_path": "Python_codes/p03231/s451852797.py", "file_name": "s451852797.py", "file_ext": "py", "file_size_in_byte": 823, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "math.gcd", "line_number": 6, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "392896708", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 4 15:53:30 2020\n\n@author: PRAJWAL\n\"\"\"\n\n\nfrom keras.preprocessing import image\nnew_image = image.load_img('../test_image_2.jpg',target_size=(64,64))\n\ntraining_set.class_indices\n\nnew_image = image.img_to_array(new_image)\nnew_image = np.expand_dims(new_image,axis = 0)\n\n\nresult = classifier.predict(new_image)\n\nif result[0][0] == 1:\n prediction = 'It is a flower'\nelse:\n prediction = 'It is a car'\n\nprint(prediction)\n", "sub_path": "Chapter 7/Activity 7.02/activity 7.02.py", "file_name": "activity 7.02.py", "file_ext": "py", "file_size_in_byte": 468, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "keras.preprocessing.image.load_img", "line_number": 10, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 10, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "180813141", "text": "import json\nimport subprocess\nimport time\n\nnow = time.time()\nstatus = json.load(open(\"/home/leo/Scripts/protonVPN/data.json\"))\n\nif now-status[\"last_update\"] > 300:\n status[\"last_update\"] = now\n status[\"last_update_readable\"] = f\"{time.asctime(time.localtime(now))}\"\n status_raw = str(subprocess.check_output(['protonvpn', 'status'])).lstrip(\"b'\").rstrip(\"'\").split(r\"\\n\")\n status_raw.pop()\n status[\"request_counter\"] += 1\n for s in status_raw:\n tmp = s.split(\":\", 1)\n tmp[0] = str(tmp[0]).replace(' ', '_')\n if tmp[0] in [\"Sent\", \"Received\"]: # ie.: tmp[1] == 529.86 KB\n subdata = tmp[1].strip(\" \").split(\" \")\n #print(\"subdata\", subdata)\n #print(\"pre round\", float(subdata[0]), type(float(subdata[0])))\n #print(\"past round\", round(float(subdata[0]), 2))\n tmp[1] == f\"{round(float(subdata[0]), 2)} {subdata[1]}\"\n #print(\"tmp[1]\", tmp[1])\n with open(f\"/home/leo/Scripts/protonVPN/{tmp[0]}.txt\", \"w\") as f:\n f.write(tmp[1].strip(\" \"))\n json.dump(status, open(\"/home/leo/Scripts/protonVPN/data.json\", \"w\"), indent=2)\nelse:\n with open(\"/home/leo/Scripts/protonVPN/next_update_in.txt\", \"w\") as f:\n f.write(f\"{300-int(now-status['last_update'])}\".zfill(2))", "sub_path": "Arch/Scripts/protonVPN/update.py", "file_name": "update.py", "file_ext": "py", "file_size_in_byte": 1285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "time.time", "line_number": 5, "usage_type": "call"}, {"api_name": "json.load", "line_number": 6, "usage_type": "call"}, {"api_name": "time.asctime", "line_number": 10, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 10, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 11, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "485105256", "text": "import argparse\nimport json\nimport os\n\nimport torch\nfrom torchvision import transforms\n\nfrom sources.general.data_loader import DatasetParams, get_loader, collate_fn_baseline\n# noinspection PyUnresolvedReferences\nfrom sources.general.vocabulary import Vocabulary, get_vocab\nfrom sources.models.baseline import Encoder, Decoder\nfrom sources.models.common import ModelParams\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nclass Baseline:\n def __init__(self, args):\n self.outputs = []\n print('model: {}, mode: {}, device: {}'.format(self.__class__.__name__,\n 'infer & save',\n device.type))\n self.transform = transforms.Compose([\n transforms.Resize(args.crop_size),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))])\n self.sequence_length = 5\n\n print('\\n============ LOADING DATA ============\\n')\n self._load_data(args)\n\n print('\\n============ LOADING MODEL ============\\n')\n self._reload_model(torch.load(args.model_path + args.model_name, map_location=lambda storage, loc: storage))\n\n def _load_data(self, args):\n dataset_configs = DatasetParams(args.dataset_config_file)\n dataset_params, vocab_path = dataset_configs.get_params(args.dataset)\n self.vocab = get_vocab(vocab_path)\n\n self.data_loader_test, _ = get_loader(dataset_configs=dataset_params, vocab=self.vocab,\n transform=self.transform, batch_size=1, shuffle=False,\n num_workers=args.num_workers, ext_feature_sets=args.features,\n skip_images=False, multi_decoder=False,\n _collate_fn=collate_fn_baseline)\n\n def _reload_model(self, state):\n self.params = ModelParams(state)\n self.encoder = Encoder(self.params).to(device)\n self.decoder = Decoder(self.params, len(self.vocab)).to(device)\n self.encoder.load_state_dict(state['encoder'])\n self.decoder.load_state_dict(state['decoder'])\n print('model loaded for inference')\n\n def infer_save(self, args):\n print('\\n============ INFERRING ============\\n')\n self.infer()\n\n print('\\n============ SAVING ============\\n')\n self.save_results(args)\n\n def infer(self):\n self.encoder.eval()\n self.decoder.eval()\n for idx, (sequences, _, _, story_ids, _) in enumerate(self.data_loader_test):\n sequence_data = Baseline.torchify_sequence(sequences)\n context_vector, sequence_features = self.encoder(sequence_data)\n sampled_ids = self.decoder.generate(context_vector.squeeze(0), sequence_features)\n sampled_ids = sampled_ids[0].cpu().numpy()\n\n sampled_caption = []\n for word_id in sampled_ids:\n word = self.vocab.idx2word[word_id]\n sampled_caption.append(word)\n if word == '':\n break\n story = ' '.join(sampled_caption)\n self.outputs.append({'story': story, 'story_id': story_ids})\n\n def save_results(self, args):\n filename = os.path.join(args.results_path, args.results_file)\n json.dump(self.outputs, open(filename, 'w'))\n\n if args.print_results:\n for d in self.outputs:\n print('{}: {}'.format(d['story_id'], d['story']))\n print(f'saved results to {filename}')\n\n @classmethod\n def torchify_sequence(cls, batch):\n batch_tensor = torch.Tensor([])\n for batch_idx in range(len(batch)):\n sequence_tensor = torch.Tensor([])\n image_sequence = batch[batch_idx]\n image_sequence.reverse()\n for image in image_sequence:\n image = image.unsqueeze(0)\n sequence_tensor = torch.cat([sequence_tensor, image])\n\n sequence_tensor = sequence_tensor.unsqueeze(0)\n batch_tensor = torch.cat([batch_tensor, sequence_tensor])\n\n return batch_tensor.to(device)\n\n\ndef main(args):\n _model = Baseline(args)\n _model.infer_save(args)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # prelim parameters\n parser.add_argument('--dataset', type=str, default='non_vist:test',\n help='dataset to use')\n parser.add_argument('--dataset_config_file', type=str,\n default='./resources/configs/datasets.local.conf',\n help='location of dataset configuration file')\n parser.add_argument('--model_path', type=str, default='./resources/models/',\n help='path for looking for models')\n parser.add_argument('--model_name', type=str, default='baseline-ep100.pth',\n help='model to load')\n parser.add_argument('--results_path', type=str, default='./resources/results/',\n help='path for storing results')\n parser.add_argument('--results_file', type=str, default='results_baseline_memad.json',\n help='results file name')\n parser.add_argument('--print_results', type=bool, default=True,\n help='option to print inference results')\n parser.add_argument('--crop_size', type=int, default=224,\n help='size for randomly cropping images')\n parser.add_argument('--tmp_dir_prefix', type=str, default='vist_tmp',\n help='where in /tmp folder to store project data')\n parser.add_argument('--log_step', type=int, default=1,\n help='step size for prining log info')\n\n # model parameters\n parser.add_argument('--extractor', type=str, default='resnet152',\n help='pretrained feature extractor CNN')\n parser.add_argument('--features', type=str, default=None,\n help='features to use as the initialload input for the '\n 'caption generator, given as comma separated list, '\n 'multiple features are concatenated, '\n 'features ending with .npy are assumed to be '\n 'precalculated features read from the named npy file, '\n 'example: \"resnet152,c_in14_gr_pool5_d_ca3.lmdb\"')\n parser.add_argument('--embed_size', type=int, default=250,\n help='dimension of text embedding vectors')\n parser.add_argument('--hidden_size', type=int, default=1000,\n help='dimension of RNN hidden states')\n parser.add_argument('--input_size', type=int, default=2048,\n help='dimension of the input feature vector')\n parser.add_argument('--num_layers', type=int, default=2,\n help='number of layers of RNN')\n parser.add_argument('--dropout', type=float, default=0.5,\n help='dropout for the RNN layers')\n parser.add_argument('--num_workers', type=int, default=0)\n parser.add_argument('--embed_type', type=str, default='default')\n parser.add_argument('--path_to_weights', type=str, default='./resources/GoogleNews-vectors-negative300.bin')\n\n main(parser.parse_args())\n", "sub_path": "sources/infer/baseline.py", "file_name": "baseline.py", "file_ext": "py", "file_size_in_byte": 7441, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "torch.device", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 25, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 26, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 34, "usage_type": "call"}, {"api_name": "sources.general.data_loader.DatasetParams", "line_number": 37, "usage_type": "call"}, {"api_name": "sources.general.vocabulary.get_vocab", "line_number": 39, "usage_type": "call"}, {"api_name": "sources.general.data_loader.get_loader", "line_number": 41, "usage_type": "call"}, {"api_name": "sources.general.data_loader.collate_fn_baseline", "line_number": 45, "usage_type": "name"}, {"api_name": "sources.models.common.ModelParams", "line_number": 48, "usage_type": "call"}, {"api_name": "sources.models.baseline.Encoder", "line_number": 49, "usage_type": "call"}, {"api_name": "sources.models.baseline.Decoder", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 101, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "308014900", "text": "from django.contrib import admin\nfrom mdeditor.widgets import MDEditorWidget\nfrom django.db import models\n\nfrom .models import BlogTag, Blog, UserProfile\n\n\n# admin.site.unregister(User)\n\n\n@admin.register(Blog)\nclass BlogAdmin(admin.ModelAdmin):\n formfield_overrides = {\n models.TextField: {'widget': MDEditorWidget}\n }\n list_display = [\n 'title',\n # 'content',\n 'short_content',\n 'tag',\n 'created_time',\n 'modify_time',\n 'is_show',\n ]\n readonly_fields = [\n 'created_time',\n 'modify_time',\n 'is_show',\n 'author',\n ]\n search_fields = [\n 'title',\n 'content',\n 'tag',\n ]\n list_filter = [\n 'tag',\n ]\n\n def save_model(self, request, obj, form, change):\n user = request.user\n obj.author = user\n obj.save()\n\n def has_delete_permission(self, request, obj=None):\n if obj is not None and obj.author == request.user:\n return True\n return False\n\n def short_content(self, obj):\n if obj.content.__len__() > 50:\n return obj.content[:50] + \"......\"\n return obj.content\n\n short_content.short_description = '正文内容'\n\n\n@admin.register(BlogTag)\nclass BlogTagAdmin(admin.ModelAdmin):\n list_display = [\n 'name',\n ]\n readonly_fields = [\n 'created_time',\n 'modify_time',\n 'is_show',\n ]\n list_filter = [\n 'name',\n ]\n search_fields = [\n 'name',\n ]\n\n def has_delete_permission(self, request, obj=None):\n if obj is not None and obj.author == request.user:\n return True\n return False\n\n\n@admin.register(UserProfile)\nclass UserProfileAdmin(admin.ModelAdmin):\n list_display = [\n 'username',\n 'gender',\n 'nickname',\n 'thumb_head_pic',\n ]\n readonly_fields = [\n\n 'thumb_head_pic',\n ]\n\n def save_model(self, request, obj, form, change):\n obj.save()\n head_url = obj.thumb_head\n obj.save()\n print(head_url)\n", "sub_path": "APPS/blog_app/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 2071, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "mdeditor.widgets.MDEditorWidget", "line_number": 14, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Blog", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 11, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 59, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 58, "usage_type": "call"}, {"api_name": "models.BlogTag", "line_number": 58, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 58, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 82, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 82, "usage_type": "name"}, {"api_name": "django.contrib.admin.register", "line_number": 81, "usage_type": "call"}, {"api_name": "models.UserProfile", "line_number": 81, "usage_type": "argument"}, {"api_name": "django.contrib.admin", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "542104507", "text": "import ffmpeg\nimport os\nimport shutil\nfrom moviepy.editor import *\nimport discord\nimport asyncio\nimport re\n\n\nclass ytpmv:\n\n def __init__(self):\n self.msgQueue = []\n self.isBusy = False\n self.vidsToMerge = []\n self.codec = 'libvpx'\n\n\n async def sendHelp(self, message):\n\n embed = discord.Embed(\n colour=discord.Colour.teal(),\n title='ytpmvbot help - click for more',\n url='https://github.com/efpi-bot/ytpmvbot',\n )\n embed.add_field(\n name='Available commands:',\n value=\"\"\"• ytpmvbot pitch/duration [...]\n• ytpmvbot trim\n• ytpmvbot volume\n• ytpmvbot add\n• ytpmvbot reset\n• ytpmvbot merge\n• ytpmvbot concat\n\"\"\"\n )\n\n await message.channel.send(embed=embed)\n\n\n\n async def addToQueue(self, message):\n\n #REPLACE LINE BREAKS WITH SPACES\n message.content = message.content.replace('\\n', ' ')\n\n self.msgQueue.append(message)\n\n\n\n async def checkQueue(self):\n if self.msgQueue == [] or self.isBusy == True:\n return\n\n self.isBusy == True\n message = self.msgQueue[0]\n\n #CLEAR TEMP\n self.clearTemp()\n\n if message.content.lower() == 'ytpmvbot add':\n await self.add(message)\n\n elif message.content.lower() == 'ytpmvbot merge':\n await self.merge(message)\n\n elif message.content.lower() == 'ytpmvbot vmerge':\n await self.merge(message, vertical=True)\n\n elif message.content.lower() == 'ytpmvbot concat':\n await self.merge(message, concat=True)\n\n elif message.content.lower() == 'ytpmvbot reset':\n await self.reset(message)\n\n elif message.content.lower() == 'ytpmvbot help':\n await self.sendHelp(message)\n\n elif message.content.lower().startswith('ytpmvbot trim'):\n await self.trim(message)\n\n elif message.content.lower().startswith('ytpmvbot volume'):\n await self.volume(message)\n\n\n elif message.content.lower().startswith('ytpmvbot '):\n await self.run(message)\n\n\n self.msgQueue.pop(0)\n self.isBusy == False\n\n\n\n def clearTemp(self):\n if os.path.exists('temp'):\n shutil.rmtree('temp')\n os.mkdir('temp')\n\n\n async def run(self, message):\n\n #CHECK FOR ATTACHMENT\n if message.attachments == [] and message.reference == None:\n return\n\n #ADD REACTION\n await message.add_reaction(emoji='⌚')\n\n\n #PARSE MESSAGE\n try:\n notes, bpm = self.parseMessage(message.content)\n except:\n await message.reply('Parsing error')\n return\n\n\n #SAVE ORIGINAL SAMPLE\n try:\n filename = await self.saveAttachmentOrRef(message)\n except:\n await message.reply('Sample file error')\n return\n\n #RENDER VIDEO CLIPS\n try:\n await self.renderFlippedVid(filename)\n except:\n await message.reply('Video rendering error')\n return\n\n #RENDER PITCHED SAMPLES\n try:\n await self.renderPitchedSamples(notes)\n except:\n await message.reply('Audio rendering error')\n return\n\n #RENDER YTPMV\n try:\n await self.renderYTPMV(notes, bpm)\n except:\n await message.reply('Video rendering error')\n return\n\n #SEND FILE TO DISCORD\n try:\n await message.reply(file=discord.File(f'./temp/ytpmvbot.webm'))\n except:\n await message.reply('File too big')\n\n\n\n def parseMessage(self, content):\n\n content = self.addSpacesInbetweenBrackets(content)\n content = self.deleteDoubleSpaces(content)\n notes, bpm, pitchOffset = self.parseArgs(content)\n notes = self.parseNotes(notes, pitchOffset)\n\n return notes, bpm\n\n\n\n def parseNotes(self, notes, pitchOffset):\n notes = ' '.join(notes)\n\n depth = 0\n notes = notes.split(' ')\n maxdepth = 0\n deptharray = []\n for i in notes:\n if i=='[':\n depth+=1\n deptharray.append([i,depth])\n if ']' in i:\n depth-=1\n if depth > maxdepth:\n maxdepth = depth\n\n parsedArray = []\n while maxdepth > 0:\n parsedArray.clear()\n for i in range(len(deptharray)):\n if deptharray[i] == ['[', maxdepth]:\n startindex = i\n elif deptharray[i][0][0] == ']' and deptharray[i][1] == maxdepth:\n repeatCount = int(deptharray[i][0].split(']')[1])\n endindex = i\n loop = deptharray[startindex+1:endindex]\n for i in range(repeatCount-1):\n for k in loop:\n parsedArray.append(k)\n else:\n parsedArray.append(deptharray[i])\n\n deptharray = parsedArray.copy()\n\n maxdepth -= 1\n\n finalArray = []\n for i in deptharray:\n pitch = i[0].split('/')[0]\n\n if pitch != '':\n pitch = float(pitch) + pitchOffset\n\n duration = float(i[0].split('/')[1])\n finalArray.append([pitch, duration])\n return finalArray\n\n\n def addSpacesInbetweenBrackets(self, content):\n newcontent = []\n for i in range(len(content)):\n if content[i] == '[' and content[i+1] != ' ':\n newcontent.append(content[i])\n newcontent.append(' ')\n elif content[i] == ']' and content[i-1] != ' ':\n newcontent.append(' ')\n newcontent.append(content[i])\n else:\n newcontent.append(content[i])\n\n newcontent = ''.join(newcontent) \n return newcontent\n\n def deleteDoubleSpaces(self, content):\n notes = content.split(' ')\n newnotes = []\n for i in notes:\n if i != '':\n newnotes.append(i)\n notes = newnotes[1:]\n return notes\n\n\n def parseArgs(self, notes):\n bpm = 120\n if '-bpm' in notes:\n index = notes.index('-bpm') + 1\n bpm = float(notes[index])\n notes.pop(index-1)\n notes.pop(index-1)\n\n if not 30 < bpm < 600:\n raise Exception\n\n pitchOffset = 0\n if '-pitchoffset' in notes:\n index = notes.index('-pitchoffset') + 1\n pitchOffset = float(notes[index])\n notes.pop(index-1)\n notes.pop(index-1)\n\n if not -25 < pitchOffset < 25:\n raise Exception\n\n return notes, bpm, pitchOffset\n\n\n async def renderFlippedVid(self,filename):\n\n inVid = ffmpeg.input(f'./temp/{filename}')\n inFlipVid = inVid.video.hflip()\n\n outVid = ffmpeg.output(inVid, 'temp/samp1.webm')\n outVid.run()\n\n outFlip = ffmpeg.output(inFlipVid, 'temp/samp-1.webm')\n outFlip.run()\n\n\n\n async def renderPitchedSamples(self, notes):\n\n uniqueNotes = []\n for i in notes:\n\n if i[0] == '':\n continue\n\n elif i[0] not in uniqueNotes:\n uniqueNotes.append(i[0])\n\n for i in uniqueNotes:\n rateFromPitch = 2**(i/12)\n if not 0.01 < rateFromPitch < 100:\n raise Exception\n\n pitchedSample = ffmpeg.input('temp/samp1.webm').audio.filter('rubberband', pitch=rateFromPitch)\n out = ffmpeg.output(pitchedSample, f'temp/samp{i}.ogg')\n out.run()\n\n\n\n async def renderYTPMV(self, notes, bpm):\n\n timelineV = []\n timelineA = []\n flipSwitch = 1\n timer = 0.0\n\n\n #MAKE FILE DICTS\n audioDict = {}\n for i in notes:\n pitch = i[0]\n if pitch in audioDict.keys():\n continue\n elif pitch != '':\n audioDict.update({pitch: AudioFileClip(f'temp/samp{pitch}.ogg')})\n else:\n continue\n\n videoDict = {'samp1': VideoFileClip(f\"temp/samp1.webm\"), 'samp-1': VideoFileClip(f\"temp/samp-1.webm\")}\n\n for i in notes:\n pitch = i[0]\n length = i[1]*60/bpm\n \n if pitch != '':\n audio = audioDict[pitch].copy()\n clip = videoDict[f'samp{flipSwitch}'].copy()\n\n clip.start = timer\n audio.start = timer\n\n if length < clip.duration and length < audio.duration:\n clip.end = timer+length\n audio.end = timer+length\n else:\n clip.end = timer+clip.duration\n audio.end = timer+audio.duration\n\n flipSwitch *= -1\n\n timelineV.append(clip)\n timelineA.append(audio)\n\n timer += length\n\n final_Vclip = CompositeVideoClip(timelineV)\n final_Aclip = CompositeAudioClip(timelineA)\n final_Vclip.audio = final_Aclip\n final_Vclip.resize(width=420).write_videofile(f\"./temp/ytpmvbot.webm\", codec=self.codec)\n\n\n #CLOSE CLIPS\n for i in audioDict.values():\n i.close()\n\n for i in videoDict.values():\n i.close()\n\n for i in timelineV:\n i.close()\n\n for i in timelineA:\n i.close()\n\n final_Vclip.close()\n final_Aclip.close()\n\n\n\n async def saveAttachmentOrRef(self, message, prefix='sample_'):\n\n #CHECK FOR ATTACHMENT\n if message.attachments == [] and message.reference == None:\n raise Exception\n\n #SAVE ORIGINAL SAMPLE\n if message.attachments != []:\n attachment = message.attachments[0]\n else:\n referencedMessage = await message.channel.fetch_message(message.reference.message_id)\n try:\n attachment = referencedMessage.attachments[0]\n except:\n raise Exception\n\n if not attachment.content_type.startswith('video'):\n raise Exception\n\n filename = prefix + attachment.filename\n print(filename)\n\n file = open(f'./temp/{filename}', 'wb')\n await attachment.save(file)\n\n file.close()\n return filename\n\n\n\n async def trim(self, message):\n\n msgSplit = message.content.split(' ')\n try:\n start = float(msgSplit[2])\n end = float(msgSplit[3])\n except:\n await message.reply('Start/end value error')\n return\n\n await message.add_reaction(emoji='⌚')\n\n #SAVE ORIGINAL SAMPLE\n try:\n filename = await self.saveAttachmentOrRef(message)\n except:\n await message.reply('Sample file error')\n return\n\n clip = VideoFileClip(f'./temp/{filename}')\n\n if end > clip.duration:\n end = clip.duration\n\n try:\n clip = clip.subclip(start, end)\n clip.resize(width=420).write_videofile('./temp/trimmed.webm', codec=self.codec)\n except:\n await message.reply('Video rendering error')\n else:\n #SEND FILE TO DISCORD\n try:\n await message.reply(file=discord.File(f'./temp/trimmed.webm'))\n except:\n await message.reply('File too big')\n finally:\n #CLOSE CLIPS\n clip.close()\n\n\n\n async def volume(self, message):\n\n msgSplit = message.content.split(' ')\n try:\n volRate = float(msgSplit[2])\n except:\n await message.reply('Volume multiplier value error')\n return\n\n await message.add_reaction(emoji='⌚')\n\n #SAVE ORIGINAL SAMPLE\n try:\n filename = await self.saveAttachmentOrRef(message)\n except:\n await message.reply('Sample file error')\n return\n\n\n clip = VideoFileClip(f'./temp/{filename}')\n\n try:\n clip = clip.volumex(volRate)\n clip.resize(width=420).write_videofile('./temp/volume.webm', codec=self.codec)\n except:\n await message.reply('Video rendering error')\n else:\n #SEND FILE TO DISCORD\n try:\n await message.reply(file=discord.File(f'./temp/volume.webm'))\n except:\n await message.reply('File too big')\n finally:\n #CLOSE CLIPS\n clip.close()\n\n\n async def add(self, message):\n\n if len(self.vidsToMerge) == 4:\n await message.reply('Can add max 4 videos. Send \\'ytpmvbot reset\\' to start over.')\n return\n\n self.vidsToMerge.append(message)\n await message.add_reaction(emoji='🎬')\n\n\n async def reset(self, message):\n self.vidsToMerge = []\n await message.add_reaction(emoji=\"👌\")\n\n\n\n async def merge(self, message, concat=False, vertical=False):\n\n if len(self.vidsToMerge) < 2:\n await message.reply('Add some videos first!')\n return\n\n await message.add_reaction(emoji='⌚')\n\n counter = 0\n filenames = []\n for i in self.vidsToMerge:\n #SAVE ORIGINAL SAMPLE\n try:\n filename = await self.saveAttachmentOrRef(i, prefix=f'merge_{counter}')\n except:\n await message.reply('Video file error. Send \\'ytpmvbot reset\\' to start over.')\n return\n\n filenames.append(filename)\n counter += 1\n\n tracks = []\n counter = 0\n for i in filenames:\n clip = VideoFileClip(f\"./temp/{i}\")\n tracks.append(clip)\n counter += 1\n\n if concat == True:\n final_clip = concatenate_videoclips(tracks, method='compose')\n\n elif len(filenames) == 2:\n\n if vertical == True:\n final_clip = clips_array([ [ tracks[0] ], [ tracks[1] ] ])\n else:\n final_clip = clips_array([tracks])\n\n elif len(filenames) == 3:\n final_clip = clips_array([tracks[:2]])\n final_clip = clips_array([ [ final_clip ], [ tracks[2] ] ])\n\n elif len(filenames) == 4:\n top = tracks[:2]\n bottom = tracks[2:]\n final_clip = clips_array([top, bottom])\n\n try:\n final_clip.resize(width=420).write_videofile(f'./temp/ytpmvmbot.webm', codec=self.codec)\n except:\n await message.reply('Video rendering error')\n else:\n #SEND FILE TO DISCORD\n try:\n await message.reply(file=discord.File(f'./temp/ytpmvmbot.webm'))\n except:\n await message.reply('File too big')\n finally:\n #CLOSE CLIPS\n for i in tracks:\n i.close()\n\n final_clip.close()\n await self.reset(message)\n\n\n#DISCORD BOT HERE\n\nKEY = open('./key').read()\nclient = discord.Client()\nytpmv = ytpmv()\n\n@client.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n\n #BACKGROUND QUEUE CHECK\n while not client.is_closed():\n await ytpmv.checkQueue()\n await asyncio.sleep(1)\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n\n if message.content.lower().startswith('ytpmvbot'):\n await ytpmv.addToQueue(message)\n\n\n\nclient.run(KEY)\n", "sub_path": "ytpmv.py", "file_name": "ytpmv.py", "file_ext": "py", "file_size_in_byte": 15486, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "discord.Embed", "line_number": 21, "usage_type": "call"}, {"api_name": "discord.Colour.teal", "line_number": 22, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 97, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 98, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 149, "usage_type": "call"}, {"api_name": "ffmpeg.input", "line_number": 265, "usage_type": "call"}, {"api_name": "ffmpeg.output", "line_number": 268, "usage_type": "call"}, {"api_name": "ffmpeg.output", "line_number": 271, "usage_type": "call"}, {"api_name": "ffmpeg.input", "line_number": 292, "usage_type": "call"}, {"api_name": "ffmpeg.output", "line_number": 293, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 430, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 468, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 546, "usage_type": "call"}, {"api_name": "discord.Client", "line_number": 561, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 571, "usage_type": "call"}]} +{"seq_id": "468351630", "text": "from redis_db import pool\r\nimport redis\r\n\r\ncon = redis.Redis(\r\n connection_pool=pool\r\n)\r\ntry:\r\n #操作哈希\r\n con.hmset(\"9527\",{\"name\":\"scott\",\"sex\":\"male\",\"age\":33})\r\n con.hset(\"9527\",\"city\",\"Beijing\")\r\n con.hdel(\"9527\",\"age\")\r\n result = con.hexists(\"9527\",\"name\")\r\n print(result)\r\n result = con.hgetall(\"9527\")\r\n for one in result:\r\n print(one.decode(\"utf-8\"),result[one].decode(\"utf-8\"))\r\nexcept Exception as e:\r\n print(e)\r\nfinally:\r\n del con", "sub_path": "demo_2/example_5.py", "file_name": "example_5.py", "file_ext": "py", "file_size_in_byte": 487, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "redis.Redis", "line_number": 4, "usage_type": "call"}, {"api_name": "redis_db.pool", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "75876078", "text": "import os\nfrom functools import partial\nfrom pluginbase import PluginBase\n\n\n# For easier usage calculate the path relative to here.\nhere = os.path.abspath(os.path.dirname(__file__))\nget_path = partial(os.path.join, here)\n\n\n# Setup a plugin base for \"example.modules\" and make sure to load\n# all the default built-in plugins from the builtin_plugins folder.\nplugin_base = PluginBase(package='plugins',\n searchpath=[get_path('./plugins')])\n\n\nclass Application(object):\n \"\"\"Represents a simple example application.\"\"\"\n\n def __init__(self):\n # And a dictionary where it stores \"formatters\". These will be\n # functions provided by plugins which format strings.\n self.plugins = {}\n\n # and a source which loads the plugins from the \"app_name/plugins\"\n # folder. We also pass the application name as identifier. This\n # is optional but by doing this out plugins have consistent\n # internal module names which allows pickle to work.\n self.source = plugin_base.make_plugin_source(\n searchpath=[get_path('./plugins')])\n\n # Here we list all the plugins the source knows about, load them\n # and the use the \"setup\" function provided by the plugin to\n # initialize the plugin.\n print('Loading Plugins:')\n for plugin_name in self.source.list_plugins():\n plugin = self.source.load_plugin(plugin_name)\n plugin.setup(self)\n print('Finished Loading Plugins:')\n\n def register_plugin(self, name, plugin):\n \"\"\"A function a plugin can use to register a formatter.\"\"\"\n self.plugins[name] = plugin\n print(\"Loaded Plugin '%s': %s\" % (name, plugin))\n\ndef main():\n Application()\n\nif __name__ == '__main__':\n main()\n", "sub_path": "src/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1781, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pluginbase.PluginBase", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "495603863", "text": "# -*- coding: utf-8 -*-\r\n# author:Super.Shen\r\nfrom datetime import *\r\nimport pandas as pd\r\npd.set_option('expand_frame_repr', False) # 当列太多时不换行\r\npd.set_option('display.max_rows', 1500)\r\n\r\ntime1 = datetime.today()\r\nnian = str(time1.year)\r\nyue = str(time1.month)\r\nri = str(time1.day-1)\r\nri_zhou = str(time1.day-5)\r\nri_now = str(time1.day)\r\n\r\nif len(yue) < 2:\r\n yue = '0' + yue\r\nif len(ri_now) < 2:\r\n ri_now = '0' + ri_now\r\nif len(ri) < 2:\r\n ri = '0' + ri\r\nif len(ri_zhou) < 2:\r\n ri_zhou = '0' + ri_zhou\r\n\r\n\r\nri_now = str(time1.day)\r\ndef fengkong_run(df):\r\n df_check = pd.DataFrame()\r\n\r\n df_run = pd.DataFrame()\r\n for x in range(0, df.shape[0]):\r\n # print(df.shape[0])\r\n # exit()\r\n df_run.loc[x, '审核员姓名'] = df.loc[x, '审核员姓名']\r\n df_run.loc[x, '电话量'] = df.loc[x, '电话量']\r\n df_run.loc[x, '昨日到期客户数'] = df.loc[x, '昨日到期客户数']\r\n df_run.loc[x, '昨日逾期客户数'] = df.loc[x, '昨日逾期客户数']\r\n df_run.loc[x, '续期客户量'] = df.loc[x, '续期客户量']\r\n df_run.loc[x, '持单总量'] = df.loc[x, '持单总量']\r\n\r\n df_run.loc[x, '自己电话+微信成功'] = df.loc[x, '电话+微信量(成功/总量)'].split('/')[0]\r\n df_run.loc[x, '自己电话+微信总量'] = df.loc[x, '电话+微信量(成功/总量)'].split('/')[1]\r\n\r\n df_run.loc[x, '电销+微信成功'] = df.loc[x, '电销+微信量(成功/总量)'].split('/')[0]\r\n df_run.loc[x, '电销+微信总量'] = df.loc[x, '电销+微信量(成功/总量)'].split('/')[1]\r\n\r\n df_run.loc[x, '不回'] = df.loc[x, '收单量(不回/不全/收全)'].split('/')[0]\r\n df_run.loc[x, '不全'] = df.loc[x, '收单量(不回/不全/收全)'].split('/')[1]\r\n df_run.loc[x, '收全'] = df.loc[x, '收单量(不回/不全/收全)'].split('/')[2]\r\n\r\n df_run.loc[x, '审核通过'] = df.loc[x, '审核通过/拒绝'].split('/')[0]\r\n df_run.loc[x, '审核拒绝'] = df.loc[x, '审核通过/拒绝'].split('/')[1]\r\n\r\n df_run.loc[x, '新单到期'] = df.loc[x, '每日新单到期还款情况'].split('/')[0]\r\n df_run.loc[x, '新单总量'] = df.loc[x, '每日新单到期还款情况'].split('/')[1]\r\n\r\n df_run.loc[x, '老单到期'] = df.loc[x, '每日老单到期还款情况'].split('/')[0]\r\n df_run.loc[x, '老单总量'] = df.loc[x, '每日老单到期还款情况'].split('/')[1]\r\n\r\n df_run.loc[x, '续借客户'] = df.loc[x, '续借率'].split('/')[0]\r\n df_run.loc[x, '续借总量'] = df.loc[x, '续借率'].split('/')[1]\r\n\r\n df_run[['电话量', '昨日到期客户数', '昨日逾期客户数', '续期客户量', '持单总量']] = df_run[\r\n ['电话量', '昨日到期客户数', '昨日逾期客户数', '续期客户量', '持单总量']].apply(pd.to_numeric)\r\n\r\n df_run[['自己电话+微信��功', '自己电话+微信总量', '电销+微信成功', '电销+微信总量', '不回', '不全', '收全', '审核通过', '审核拒绝', '新单到期', '新单总量', '老单到期',\r\n '老单总量', '续借客户', '续借总量']] = \\\r\n df_run[\r\n ['自己电话+微信成功', '自己电话+微信总量', '电销+微信成功', '电销+微信总量', '不回', '不全', '收全', '审核通过', '审核拒绝', '新单到期', '新单总量', '老单到期',\r\n '老单总量', '续借客户', '续借总量']].apply(pd.to_numeric)\r\n\r\n i=40\r\n for col in df_run.columns:\r\n df_run.loc[i + 1, col] = df_run[col].sum()\r\n\r\n df.loc[i, '电话+微信量(成功/总量)'] = str(int(df_run.loc[i+1]['自己电话+微信成功'])) + '/' + str(int(df_run.loc[i+1]['自己电话+微信总量']))\r\n # df.loc[i+1, '电话+微信量(成功/总量)'] = str(round((df_run.loc[i+1]['自己电话+微信成功'] / df_run.loc[i+1]['自己电话+微信总量']) * 100, 2)) + '%'\r\n\r\n df.loc[i, '电销+微信量(成功/总量)'] = str(int(df_run.loc[i+1]['电销+微信成功'])) + '/' + str(int(df_run.loc[i+1]['电销+微信总量']))\r\n # df.loc[i+1, '电销+微信量(成功/总量)'] = str(round((df_run.loc[i+1]['电销+微信成功'] / df_run.loc[i+1]['电销+微信总量']) * 100, 2)) + '%'\r\n\r\n df.loc[i, '收单量(不回/不全/收全)'] = str(int(df_run.loc[i+1]['不回'])) + '/' + str(int(df_run.loc[i+1]['不全'])) + '/' + str(\r\n int(df_run.loc[i+1]['收全']))\r\n # df.loc[i+1, '收单量(不回/不全/收全)'] = str(\r\n # round(df_run.loc[i+1]['收全'] / (df_run.loc[i+1]['不回'] + df_run.loc[i+1]['不全'] + df_run.loc[i+1]['收全']) * 100,\r\n # 2)) + '%'\r\n\r\n df.loc[i, '审核通过/拒绝'] = str(int(df_run.loc[i+1]['审核通过'])) + '/' + str(int(df_run.loc[i+1]['审核拒绝']))\r\n # df.loc[i+1, '审核通过/拒绝'] = str(\r\n # round(df_run.loc[i+1]['审核通过'] / (df_run.loc[i+1]['审核通过'] + df_run.loc[i+1]['审核拒绝']) * 100, 2)) + '%'\r\n\r\n df.loc[i, '每日新单到期还款情况'] = str(int(df_run.loc[i+1]['新单到期'])) + '/' + str(int(df_run.loc[i+1]['新单总量']))\r\n # df.loc[i+1, '每日新单到期还款情况'] = str(round((df_run.loc[i+1]['新单到期'] / df_run.loc[i+1]['新单总量']) * 100, 2)) + '%'\r\n\r\n df.loc[i, '每日老单到期还款情况'] = str(int(df_run.loc[i+1]['老单到期'])) + '/' + str(int(df_run.loc[i+1]['老单总量']))\r\n # df.loc[i+1, '每日老单到期还款情况'] = str(round((df_run.loc[i+1]['老单到期'] / df_run.loc[i+1]['老单总量']) * 100, 2)) + '%'\r\n\r\n df.loc[i, '续借率'] = str(int(df_run.loc[i+1]['续借客户'])) + '/' + str(int(df_run.loc[i+1]['续借总量']))\r\n # df.loc[i+1, '续借率'] = str(round((df_run.loc[i+1]['续借客户'] / df_run.loc[i+1]['续借总量']) * 100, 2)) + '%'\r\n\r\n df.loc[i, '电话量'] = df_run.loc[i+1]['电话量']\r\n df.loc[i, '昨日到期客户数'] = df_run.loc[i+1]['昨日到期客户数']\r\n df.loc[i, '昨日逾期客户数'] = df_run.loc[i+1]['昨日逾期客户数']\r\n df.loc[i, '续期客户量'] = df_run.loc[i+1]['续期客户量']\r\n df.loc[i, '持单总量'] = df_run.loc[i+1]['持单总量']\r\n\r\n df.loc[i, '审核员姓名'] = df.loc[0, '审核员姓名']\r\n\r\n df = df.reset_index(drop=True)\r\n # print(df)\r\n # exit()\r\n\r\n return df\r\n\r\n\r\ndef zong_run(df):\r\n\r\n df_check = pd.DataFrame()\r\n\r\n df_run = pd.DataFrame()\r\n for x in range(0, df.shape[0]):\r\n df_check.loc[x, '审核员姓名'] = df.loc[x, '审核员姓名']\r\n df_run.loc[x, '电话量'] = df.loc[x, '电话量']\r\n df_run.loc[x, '昨日到期客户数'] = df.loc[x, '昨日到期客户数']\r\n df_run.loc[x, '昨日逾期客户数'] = df.loc[x, '昨日逾期客户数']\r\n df_run.loc[x, '续期客户量'] = df.loc[x, '续期客户量']\r\n df_run.loc[x, '持单总量'] = df.loc[x, '持单总量']\r\n\r\n df_run.loc[x, '自己电话+微信成功'] = df.loc[x, '电话+微信量(成功/总量)'].split('/')[0]\r\n df_run.loc[x, '自己电话+微信总量'] = df.loc[x, '电话+微信量(成功/总量)'].split('/')[1]\r\n\r\n df_run.loc[x, '电销+微信成功'] = df.loc[x, '电销+微信量(成功/总量)'].split('/')[0]\r\n df_run.loc[x, '电销+微信总量'] = df.loc[x, '电销+微信量(成功/总量)'].split('/')[1]\r\n\r\n df_run.loc[x, '不回'] = df.loc[x, '收单量(不回/不全/收全)'].split('/')[0]\r\n df_run.loc[x, '不全'] = df.loc[x, '收单量(不回/不全/收全)'].split('/')[1]\r\n df_run.loc[x, '收全'] = df.loc[x, '收单量(不回/不全/收全)'].split('/')[2]\r\n\r\n df_run.loc[x, '审核通过'] = df.loc[x, '审核通过/拒绝'].split('/')[0]\r\n df_run.loc[x, '审核拒绝'] = df.loc[x, '审核通过/拒绝'].split('/')[1]\r\n\r\n df_run.loc[x, '新单到期'] = df.loc[x, '每日新单到期还款情况'].split('/')[0]\r\n df_run.loc[x, '新单总量'] = df.loc[x, '每日新单到期还款情况'].split('/')[1]\r\n\r\n df_run.loc[x, '老单到期'] = df.loc[x, '每日老单到期还款情况'].split('/')[0]\r\n df_run.loc[x, '老单总量'] = df.loc[x, '每日老单到期还款情况'].split('/')[1]\r\n\r\n df_run.loc[x, '续借客户'] = df.loc[x, '续借率'].split('/')[0]\r\n df_run.loc[x, '续借总量'] = df.loc[x, '续借率'].split('/')[1]\r\n\r\n df_run[['电话量', '昨日到期客户数', '昨日逾期客户数', '续期客户量', '持单总量']] = df_run[\r\n ['电话量', '昨日到期客户数', '昨日逾期客户数', '续期客户量', '持单总量']].apply(pd.to_numeric)\r\n\r\n df_run[['自己电话+微信成功', '自己电话+微信总量', '电销+微信成功', '电销+微信总量', '不回', '不全', '收全', '审核通过', '审核拒绝', '新单到期', '新单总量', '老单到期',\r\n '老单总量', '续借客户', '续借总量']] = \\\r\n df_run[\r\n ['自己电话+微信成功', '自己电话+微信总量', '电销+微信成功', '电销+微信总量', '不回', '不全', '收全', '审核通过', '审核拒绝', '新单到期', '新单总量', '老单到期',\r\n '老单总量', '续借客户', '续借总量']].apply(pd.to_numeric)\r\n\r\n def pan(x):\r\n if x == True:\r\n x = ''\r\n return x\r\n\r\n df_check['电话量填在微信'] = (df_run['自己电话+微信总量'] <= 400).apply(lambda x: pan(x))\r\n df_check['电话+微信检测'] = (df_run['自己电话+微信成功'] <= df_run['自己电话+微信总量']).apply(lambda x: pan(x))\r\n df_check['电销+微信检测'] = (df_run['电销+微信成功'] <= df_run['电销+微信总量']).apply(lambda x: pan(x))\r\n df_check['收全检测'] = (df_run['收全'] == df_run['审核通过'] + df_run['审核拒绝']).apply(lambda x: pan(x))\r\n df_check['新单到期检测'] = (df_run['新单到期'] <= df_run['新单总量']).apply(lambda x: pan(x))\r\n df_check['老单到期检测'] = (df_run['老单到期'] <= df_run['老单总量']).apply(lambda x: pan(x))\r\n df_check['续借检测'] = (df_run['续借客户'] <= df_run['续借总量']).apply(lambda x: pan(x))\r\n\r\n # 风控检测结果\r\n print(df_check)\r\n # df_check.to_excel('C:\\\\Users\\Administrator\\Desktop\\风控部门错误标记-周.xlsx')\r\n\r\n for col in df_run.columns:\r\n df_run.loc[20 + 1, col] = df_run[col].sum()\r\n\r\n df.loc[20, '电话+微信量(成功/总量)'] = str(int(df_run.loc[21]['自己电话+微信成功'])) + '/' + str(int(df_run.loc[21]['自己电话+微信总量']))\r\n df.loc[21, '电话+微信量(成功/总量)'] = str(round((df_run.loc[21]['自己电话+微信成功'] / df_run.loc[21]['自己电话+微信总量']) * 100, 2)) + '%'\r\n\r\n df.loc[20, '电销+微信量(成功/总量)'] = str(int(df_run.loc[21]['电销+微信成功'])) + '/' + str(int(df_run.loc[21]['电销+微信总量']))\r\n df.loc[21, '电销+微信量(成功/总量)'] = str(round((df_run.loc[21]['电销+微信成功'] / df_run.loc[21]['电销+微信总量']) * 100, 2)) + '%'\r\n\r\n df.loc[20, '收单量(不回/不全/收全)'] = str(int(df_run.loc[21]['不回'])) + '/' + str(int(df_run.loc[21]['不全'])) + '/' + str(\r\n int(df_run.loc[21]['收全']))\r\n df.loc[21, '收单量(不回/不全/收全)'] = str(\r\n round(df_run.loc[21]['收全'] / (df_run.loc[21]['不回'] + df_run.loc[21]['不全'] + df_run.loc[21]['收全']) * 100,\r\n 2)) + '%'\r\n\r\n df.loc[20, '审核通过/拒绝'] = str(int(df_run.loc[21]['审核通过'])) + '/' + str(int(df_run.loc[21]['审核拒绝']))\r\n df.loc[21, '审核通过/拒绝'] = str(\r\n round(df_run.loc[21]['审核通过'] / (df_run.loc[21]['审核通过'] + df_run.loc[21]['审核拒绝']) * 100, 2)) + '%'\r\n\r\n df.loc[20, '每日新单到期还款情况'] = str(int(df_run.loc[21]['新单到期'])) + '/' + str(int(df_run.loc[21]['新单总量']))\r\n df.loc[21, '每日新单到期还款情况'] = str(round((df_run.loc[21]['新单到期'] / df_run.loc[21]['新单总量']) * 100, 2)) + '%'\r\n\r\n df.loc[20, '每日老单到期还款情况'] = str(int(df_run.loc[21]['老单到期'])) + '/' + str(int(df_run.loc[21]['老单总量']))\r\n df.loc[21, '每日老单到期还款情况'] = str(round((df_run.loc[21]['老单到期'] / df_run.loc[21]['老单总量']) * 100, 2)) + '%'\r\n\r\n df.loc[20, '续借率'] = str(int(df_run.loc[21]['续借客户'])) + '/' + str(int(df_run.loc[21]['续借总量']))\r\n df.loc[21, '续借率'] = str(round((df_run.loc[21]['续借客户'] / df_run.loc[21]['续借总量']) * 100, 2)) + '%'\r\n\r\n df.loc[20, '电话量'] = df_run.loc[21]['电话量']\r\n df.loc[20, '昨日到期客户数'] = df_run.loc[21]['昨日到期客户数']\r\n df.loc[20, '昨日逾���客户数'] = df_run.loc[21]['昨日逾期客户数']\r\n df.loc[20, '续期客户量'] = df_run.loc[21]['续期客户量']\r\n df.loc[20, '持单总量'] = df_run.loc[21]['持单总量']\r\n\r\n df.loc[20, '审核员姓名'] = '数据汇总'\r\n df.loc[21, '审核员姓名'] = '成功比率'\r\n\r\n # 汇总新增字段\r\n i = 20\r\n df_all = df.reset_index(drop=True)\r\n\r\n len_new = df_all.shape[0] - 1\r\n len_dx = df_all.shape[0]\r\n\r\n df_all.loc[len_dx + i + 1, '审核员姓名'] = ''\r\n df_all.loc[len_dx + i + 2, '审核员姓名'] = ''\r\n df_all.loc[len_dx + i + 3, '审核员姓名'] = 'Flag'\r\n df_all.loc[len_dx + i + 3, '电话量'] = '风控汇总'\r\n df_all.loc[len_dx + i + 3, '电话+微信量(成功/总量)'] = '成功比例'\r\n # print(df_all)\r\n # exit()\r\n\r\n k = 4\r\n for col in df_all.columns[1:]:\r\n df_all.loc[len_dx + i + k, '审核员姓名'] = col\r\n df_all.loc[len_dx + i + k, '电话量'] = df_all.loc[len_new-1, col]\r\n df_all.loc[len_dx + i + k, '电话+微信量(成功/总量)'] = df_all.loc[len_new, col]\r\n k += 1\r\n\r\n return df_all\r\n\r\n\r\n\r\nimport pandas as pd\r\nimport os\r\npd.set_option('expand_frame_repr', False) # 当列太多时不换行\r\npd.set_option('display.max_rows', 1000)\r\n\r\ndef Statistic():\r\n\r\n file1='C:\\\\Users\\Administrator\\Desktop\\Feedback_Data.xlsx'\r\n file2='C:\\\\Users\\Administrator\\Desktop\\Week_Data.xlsx'\r\n\r\n list1 = [file1, file2]\r\n\r\n for no, i in enumerate(list1):\r\n if os.path.exists(i):\r\n df = pd.read_excel(i)\r\n\r\n # 统计接通率/需求率\r\n i = 0\r\n df1 = pd.DataFrame()\r\n for x, y in df.groupby(['资源来源']):\r\n df1.loc[i, '资源名称'] = x\r\n df1.loc[i, '电话数量'] = y['资源来源'].count()\r\n df1.loc[i, '接通率'] = str(int((y[y['接通率'] == '接通']['接通率'].count() / y['接通率'].count()) * 100))+'%'\r\n df1.loc[i, '需求率'] = str(int((y[y['需求率'] == '需要']['需求率'].count() / y['需求率'].count()) * 100))+'%'\r\n i += 1\r\n\r\n df1 = df1.reset_index(drop=True)\r\n df1.loc[30, '资源名称'] = '电话总量'\r\n df1.loc[30, '电话数量'] = df1['电话数量'].sum()\r\n\r\n if no==0:\r\n df1.to_excel('C:\\\\Users\\Administrator\\Desktop\\\\每日资源数据统计.xlsx',index=False)\r\n elif no==1:\r\n df1.to_excel('C:\\\\Users\\Administrator\\Desktop\\\\每周资源数据统计.xlsx', index=False)", "sub_path": "Week_Report/Func.py", "file_name": "Func.py", "file_ext": "py", "file_size_in_byte": 15488, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pandas.set_option", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.today", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pandas.to_numeric", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 117, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 119, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pandas.to_numeric", "line_number": 157, "usage_type": "attribute"}, {"api_name": "pandas.set_option", "line_number": 241, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 252, "usage_type": "call"}, {"api_name": "os.path", "line_number": 252, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 253, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 257, "usage_type": "call"}]} +{"seq_id": "459373125", "text": "from recon.core.module import BaseModule\nfrom lxml import etree, builder\nimport os\n\nclass Module(BaseModule):\n\n meta = {\n 'name': 'XML nmap report',\n 'author': 's0i37',\n 'version': 'v0.0.1',\n 'description': 'Creates a XML nmap report. For import results in metasploit, ncrack and others',\n 'options': (\n ('filename', os.path.join(BaseModule.workspace, 'results.xml'), True, 'path and filename for report output'),\n ),\n }\n\n def module_run(self):\n filename = self.options['filename']\n xml = builder.ElementMaker()\n with open(filename, 'wb') as o:\n xml_nmap = xml.nmaprun()\n for ip_address,_ in self.query(\"SELECT ip_address,1 FROM ports\"):\n xml_status = xml.status( {'state':'up', 'reason':'user-set', 'reason_ttl':'0'} )\n xml_address = xml.address( {'addr':ip_address, 'addrtype':'ipv4'} )\n xml_hostnames = xml.hostnames()\n for domain,_ in self.query(\"SELECT host,1 FROM hosts where ip_address='%s'\" % ip_address):\n if domain:\n xml_hostnames.insert(0, xml.hostname( {'name':domain, 'type':'PTR'} ) ) \n xml_ports = xml.ports()\n for port,protocol in self.query(\"SELECT port,protocol from ports where ip_address='%s'\" % ip_address):\n xml_ports.insert( 0, xml.port( {'protocol':'tcp', 'portid':port}, xml.state( {'state':'open'} ), xml.service( {'name': protocol if not protocol is None else ''} ) ) )\n \n xml_nmap.insert( 0,\n xml.host( xml_status, xml_address, xml_hostnames, xml_ports )\n )\n\n o.write( etree.tostring(xml_nmap, xml_declaration=True) )\n self.output('ports data written to \\'%s\\'.' % filename)\n", "sub_path": "reports/xml_nmap.py", "file_name": "xml_nmap.py", "file_ext": "py", "file_size_in_byte": 1864, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "recon.core.module.BaseModule", "line_number": 5, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "recon.core.module.BaseModule.workspace", "line_number": 13, "usage_type": "attribute"}, {"api_name": "recon.core.module.BaseModule", "line_number": 13, "usage_type": "name"}, {"api_name": "lxml.builder.ElementMaker", "line_number": 19, "usage_type": "call"}, {"api_name": "lxml.builder", "line_number": 19, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 37, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "308366283", "text": "# coding=utf-8\nimport time\nfrom threading import *\nimport wx\nimport os\nimport socket\n'''\nhost = '192.168.0.39'\nport = 12345\nsk = socket.socket()\nsk.connect((host, port))\n'''\n\ndate_time_program_start = time.localtime()\n\n\ndef get_all_data_dirs(data_dir_root):\n\n list_files_dir = []\n for dirpath, dirnames, filenames in os.walk(data_dir_root):\n for filepath in filenames:\n list_files_dir.append(os.path.join(dirpath, filepath))\n\n return list_files_dir\n\n\ndef get_state(num):\n\n if num % 2 == 0:\n return \"ON\"\n else:\n return \"OFF\"\n\n\ndata_dir_root = '/home/wang/下载/wsuk1/newData'\nall_data_dirs = get_all_data_dirs(data_dir_root)\nprint(len(all_data_dirs))\n\n\ndict_device_motion = {}\ndict_device_temperate = {}\n\n\n# list_button_temp_lists = [wx.NewId() for keys in dict_device_temperate]\nEVT_RESULT_ID = wx.NewId()\nEVT_STATE_ID = wx.NewId()\n\n# Button definitions\nID_START = wx.NewId()\nID_STOP = wx.NewId()\nID_BUTTON = wx.NewId()\n\ndict_device = \\\n {\n \"M047\": (160, 120), \"M046\": (120, 310), \"M045\": (160, 560), \"L001\": (110, 710), \"M027\": (140, 1080),\n \"L003\": (290, 1050), \"L004\": (330, 1880), \"M031\": (370, 1350), \"T003\": (370, 1380), \"M032\": (370, 1570),\n \"M033\": (370, 1790), \"M048\": (460, 120), \"M049\": (460, 310), \"T004\": (460, 350), \"M050\": (460, 560),\n \"M043\": (460, 900), \"M028\": (460, 1080), \"M044\": (490, 740), \"D004\": (520, 790), \"M036\": (650, 1360),\n \"M035\": (650, 1570), \"M034\": (650, 1790), \"E002\": (670, 610), \"T005\": (670, 700), \"D003\": (690, 1180),\n \"M042\": (740, 900), \"M029\": (740, 1080), \"M030\": (740, 1220), \"M037\": (900, 1080), \"D005\": (900, 1160),\n \"M038\": (960, 1270), \"M039\": (960, 1520), \"L006\": (960, 1600), \"M040\": (960, 1740), \"M041\": (960, 1820),\n \"L007\": (960, 1880), \"D006\": (990, 1780), \"F001\": (1120, 1590), \"F002\": (1120, 1870), \"L002\": (1130, 710),\n \"L005\": (1160, 1250),\n \"R002\": (1380, 630), \"I001\": (1390, 840), \"M025\": (1420, 1830), \"I012\": (1430, 870), \"M026\": (1430, 1600),\n \"D013\": (1440, 790), \"M004\": (1460, 160), \"M003\": (1460, 430), \"M002\": (1460, 670), \"L009\": (1530, 1770),\n \"D001\": (1570, 1970), \"M001\": (1690, 920), \"M023\": (1690, 1010), \"M022\": (1690, 1270), \"M021\": (1690, 1530),\n \"M024\": (1690, 1830), \"M005\": (1750, 160), \"M006\": (1750, 430), \"T001\": (1750, 430), \"M007\": (1750, 670),\n \"D012\": (1840, 1070), \"L011\": (1950, 1780), \"M020\": (1960, 1740), \"M019\": (1970, 1530), \"M008\": (1980, 780),\n \"I008\": (2010, 510), \"I009\": (2010, 580), \"M011\": (2050, 160), \"M010\": (2050, 430), \"M009\": (2050, 670),\n \"R001\": (2050, 1370), \"D008\": (2060, 1030), \"D009\": (2060, 1090), \"D010\": (2060, 1220), \"E001\": (2160, 200),\n \"D002\": (2220, 20), \"L010\": (2260, 1220), \"M051\": (2290, 1770), \"M015\": (2300, 930), \"M016\": (2300, 1010),\n \"M017\": (2300, 1270), \"T002\": (2300, 1320), \"M018\": (2300, 1530), \"M012\": (2370, 160), \"M013\": (2370, 430),\n \"M014\": (2370, 670), \"D011\": (2450, 1760), \"I007\": (2490, 130), \"D016\": (2510, 1050), \"D017\": (2510, 1300),\n \"D014\": (2550, 1440), \"D015\": (2550, 1540),\n \"L008\": (1950, 2.8)\n }\n\n\nlist_button_lists = [wx.NewId() for keys in dict_device]\n\n\n# Define notification event for thread completion\nlist_button_motion_lists = []\nlist_button_motion_states = []\nfor keys in dict_device:\n dict_device_motion[keys] = wx.NewId()\n list_button_motion_lists.append(dict_device_motion[keys])\n list_button_motion_states.append(wx.NewId())\n\ndict_device_motion_inverse = dict([val,key] for key,val in dict_device_motion.items())\n\ndict_motions_pairs = {}\nfor i in range (len(list_button_motion_lists)):\n dict_motions_pairs[list_button_motion_lists[i]] = list_button_motion_states[i]\n\nprint(dict_device_motion)\nprint(dict_motions_pairs)\n\n# list_button_temp_lists = [wx.NewId() for keys in dict_device_temperate]\nEVT_RESULT_ID = wx.NewId()\nEVT_STATE_ID = wx.NewId()\n\n\ndef EVT_RESULT(win, func):\n \"\"\"Define Result Event.\"\"\"\n win.Connect(-1, -1, EVT_RESULT_ID, func)\n\n\ndef EVT_STATE(win, func):\n \"\"\"Define Result Event.\"\"\"\n win.Connect(-1, -1, EVT_STATE_ID, func)\n\n\ndef EVT_MOTION_STATE(win, func, motionID):\n \"\"\"Define Motion State Event.\"\"\"\n win.Connect(-1, -1, dict_motions_pairs[motionID], func)\n\n\nclass ResultEvent(wx.PyEvent):\n \"\"\"Simple event to carry arbitrary result data.\"\"\"\n def __init__(self, data):\n \"\"\"Init Result Event.\"\"\"\n wx.PyEvent.__init__(self)\n self.SetEventType(EVT_RESULT_ID)\n self.data = data\n\n\nclass StateEvent(wx.PyEvent):\n \"\"\"Simple event to carry state data of a button.\"\"\"\n def __init__(self, data):\n \"\"\"Init Result Event.\"\"\"\n wx.PyEvent.__init__(self)\n self.SetEventType(EVT_STATE_ID)\n self.data = data\n\n\nclass MotionStateEvent(wx.PyEvent):\n \"\"\"Simple event to carry state data of a Motion button.\"\"\"\n def __init__(self, Motion_ID, data):\n \"\"\"Init Result Event.\"\"\"\n wx.PyEvent.__init__(self)\n self.SetEventType(dict_motions_pairs[Motion_ID])\n self.data = data\n\n\nclass ButtonFrame(wx.Frame):\n\n def __init__(self, data):\n wx.Frame.__init__(self, data)\n self.button.SetLabel(data)\n self.Bind(wx.EVT_BUTTON, self.OnClick, self.button)\n\n def OnClick(self, event):\n\n if self.button.GetLabel() == \"ON\":\n self.button.SetLabel(\"OFF\")\n else:\n self.button.SetLabel(\"ON\")\n\n def autoClick(self, value):\n\n self.button.SetLabel(value)\n\n\nclass ToggleButtonFrame(wx.Frame):\n\n def __init__(self, data):\n wx.Frame.__init__(self, data)\n self.ToggleButton.SetLabel(data)\n self.Bind(wx.EVT_BUTTON, self.OnClick, self.ToggleButton)\n\n def OnClick(self, event):\n\n if self.ToggleButton.GetValue() == \"ON\":\n self.ToggleButton.SetValue(\"OFF\")\n else:\n self.ToggleButton.SetValue(\"ON\")\n\n def autoClick(self, value):\n\n self.ToggleButton.SetValue(value)\n\n\n# Thread class that executes processing\nclass WorkerThread(Thread):\n \"\"\"Worker Thread Class.\"\"\"\n def __init__(self, notify_window):\n \"\"\"Init Worker Thread Class.\"\"\"\n Thread.__init__(self)\n self._notify_window = notify_window\n self._want_abort = 0\n # This starts the thread running on creation, but you could\n # also make the GUI thread responsible for calling this\n self.start()\n\n def run(self):\n \"\"\"Run Worker Thread.\"\"\"\n # This is the code executing in the new thread. Simulation of\n # a long process (well, 10s here) as a simple loop - you will\n # need to structure your processing so that you periodically\n # peek at the abort variable\n for i in range(10):\n time.sleep(0.11)\n wx.PostEvent(self._notify_window, StateEvent(get_state(i)))\n\n if self._want_abort:\n # Use a result of None to acknowledge the abort (of\n # course you can use whatever you'd like or even\n # a separate event type)\n wx.PostEvent(self._notify_window, ResultEvent(None))\n return\n # Here's where the result would be returned (this is an\n # example fixed result of the number 10, but it could be\n # any Python object)\n wx.PostEvent(self._notify_window, ResultEvent(10))\n with open(\"/home/wang/下载/wsuk1/newData/final_input\", 'r') as f:\n start_time = 0\n for line in f:\n line = line.strip()\n list_state = line.split('\\t')\n if len(list_state) < 3:\n continue\n event_time = list_state[0]\n event_device = list_state[1]\n event_state = list_state[2]\n if event_device in dict_device_motion:\n if not start_time:\n pass\n else:\n time.sleep(time.mktime(time.strptime(event_time[:-7],\n \"%Y-%m-%d %H:%M:%S\")) + float(event_time[-7:]) - start_time)\n print(time.mktime(time.strptime(event_time[:-7],\n \"%Y-%m-%d %H:%M:%S\")) + float(event_time[-7:]))\n start_time = time.mktime(time.strptime(event_time[:-7],\n \"%Y-%m-%d %H:%M:%S\")) + float(event_time[-7:])\n\n data = event_time + ' ' + event_device + ' ' + event_state + '\\n'\n\n # sk.send(bytes(data))\n print(\"%s %s %s\\n\" % (event_time, event_device, event_state))\n wx.PostEvent(self._notify_window, MotionStateEvent(dict_device_motion[event_device],\n (event_device, event_state)))\n else:\n pass\n\n sk.close()\n\n def abort(self):\n \"\"\"abort worker thread.\"\"\"\n # Method for use by main thread to signal an abort\n self._want_abort = 1\n\n\n# GUI Frame class that spins off the worker thread\nclass MainFrame(wx.Frame):\n \"\"\"Class MainFrame.\"\"\"\n def __init__(self, parent):\n \"\"\"Create the MainFrame.\"\"\"\n wx.Frame.__init__(self, parent, id=-1, title='Final Thread Test', size=(3000,2000))\n\n panel = wx.Panel(self)\n # button1 = wx.Button(self, id=wx.NewId(), label=u'确认', pos=(30, 150))\n # button2 = wx.Button(self, id=wx.NewId(), label=u'取消', pos=(180, 150))\n self.DateTimeInput = wx.TextCtrl(self, wx.NewId(), \"2007-10-25 15:04:24\", (0, 230), (175, -1))\n\n self.begin_time = self.DateTimeInput.GetValue()\n self.states = []\n self.get_dict = {}\n # Dumb sample frame with two buttons\n i = 0\n self.dict_motions_index = {}\n for keys in dict_device:\n pos_x = dict_device[keys][0] * 0.65\n pos_y = dict_device[keys][1] / 2\n if(keys.startswith('L')):\n button = wx.Button(self, list_button_motion_lists[i], str(keys), pos=(pos_x, pos_y), size=(50, 20))\n button.SetBackgroundColour(\"#FFCC66\") # 设置按钮的背景颜色\n else:\n wx.Button(self, list_button_motion_lists[i], str(keys), pos=(pos_x, pos_y), size=(50, 20))\n self.states.append(wx.Button(self, list_button_motion_states[i], 'Initial'\n , pos=(pos_x + 50, pos_y), size=(50, 20)))\n\n self.get_dict[list_button_motion_lists[i]] = self.states[i];\n\n self.dict_motions_index[keys] = i\n # wx.Button(self, list_button_motion_states[i], 'START', pos=(100 * (i % 12), 50+int(i / 12) * 100))\n # dict_motions_pairs[list_button_motion_lists[i]] = list_button_motion_states[i]\n i = i + 1\n\n wx.Button(self, ID_START, 'Start', pos=(0, 300))\n wx.Button(self, ID_STOP, 'Stop', pos=(0, 400))\n wx.Button(self, ID_BUTTON, 'Initial', pos=(0, 500))\n\n\n self.status = wx.StaticText(self, -1, '', pos=(0, 600))\n\n self.Bind(wx.EVT_BUTTON, self.OnStart, id=ID_START)\n self.Bind(wx.EVT_BUTTON, self.OnStop, id=ID_STOP)\n self.Bind(wx.EVT_BUTTON, self.OnState, id=ID_BUTTON)\n\n for motions in dict_motions_pairs:\n self.Bind(wx.EVT_BUTTON, self.OnClick, id=motions)\n\n for motions in dict_motions_pairs:\n self.Bind(wx.EVT_BUTTON, self.OnMotion, id=dict_motions_pairs[motions])\n EVT_MOTION_STATE(self, self.OnMotion, motions)\n # dict_motions_pairs[motions](self, self.OnState)\n\n # Set up event handler for any worker thread results\n EVT_RESULT(self, self.OnResult)\n EVT_STATE(self, self.OnState)\n\n self.worker = None\n\n def CloseMe(self, evt):\n\n UserName = self.Username.GetValue()\n PassWord = self.Password.GetValue()\n date_time_start = self.DateTimeInput.GetValue()\n print(date_time_start)\n if (UserName == 'demo') and (PassWord =='demo'):\n self.Destroy()\n\n def OnStart(self, event):\n\n \"\"\"Start Computation.\"\"\"\n # Trigger the worker thread unless it's already busy\n if not self.worker:\n self.status.SetLabel('Starting computation')\n self.worker = WorkerThread(self)\n\n def OnStop(self, event):\n \"\"\"Stop Computation.\"\"\"\n # Flag the worker thread to stop if running\n if self.worker:\n self.status.SetLabel('Trying to abort computation')\n self.worker.abort()\n\n def OnResult(self, event):\n \"\"\"Show Result status.\"\"\"\n if event.data is None:\n # Thread aborted (using our convention of None return)\n self.status.SetLabel('Computation aborted')\n else:\n # Process results here\n self.status.SetLabel('Computation Result: %s And now start! ' % event.data)\n # In either event, the worker is done\n self.worker = None\n\n def OnState(self, event):\n \"\"\"Show State status.\"\"\"\n if event.data is \"ON\":\n # Thread aborted (using our convention of None return)\n self.status.SetLabel('ON')\n else:\n # Process results here\n self.status.SetLabel('OFF')\n # In either event, the worker is done\n self.worker = None\n\n def OnMotion(self, event):\n\n # dict_motions_pairs[botton_id]\n self.states[self.dict_motions_index[event.data[0]]].SetLabel(event.data[1])\n self.worker = None\n\n\n def OnClick(self, event):\n\n Test_Button = self.get_dict[event.GetId()]\n botton_name = dict_device_motion_inverse[event.GetId()]\n self.begin_time = self.DateTimeInput.GetValue()\n with open('/home/wang/ActivityRecognitionInSmartHome-master/click_data/click_test', 'a+') as fw:\n\n the_time = time.strftime(\"%Y-%m-%d %H:%M:%S\",\n\n time.localtime(time.mktime(time.localtime()) -\n time.mktime(date_time_program_start)\n + time.mktime(time.strptime(self.begin_time, \"%Y-%m-%d %H:%M:%S\")) ))\n\n if Test_Button.GetLabel() == \"ON\":\n\n Test_Button.SetLabel(\"OFF\")\n\n fw.write(\"%s\\t%s\\t%s\\n \" % (botton_name, 'OFF', the_time))\n\n else:\n\n Test_Button.SetLabel(\"ON\")\n # the_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n fw.write(\"%s\\t%s\\t%s\\n \" % (botton_name, 'ON', the_time))\n\n print (botton_name)\n\n\nclass MainApp(wx.App):\n \"\"\"Class Main App.\"\"\"\n def OnInit(self):\n \"\"\"Init Main App.\"\"\"\n self.frame = MainFrame(None)\n self.frame.Show(True)\n self.SetTopWindow(self.frame)\n return True\n\n\nif __name__ == '__main__':\n\n app = MainApp(0)\n app.MainLoop()\n\n", "sub_path": "positon_simulate.py", "file_name": "positon_simulate.py", "file_ext": "py", "file_size_in_byte": 14925, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "time.localtime", "line_number": 14, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "wx.NewId", "line_number": 45, "usage_type": "call"}, {"api_name": "wx.NewId", "line_number": 46, "usage_type": "call"}, {"api_name": "wx.NewId", "line_number": 49, "usage_type": "call"}, {"api_name": "wx.NewId", "line_number": 50, "usage_type": "call"}, {"api_name": "wx.NewId", "line_number": 51, "usage_type": "call"}, {"api_name": "wx.NewId", "line_number": 79, "usage_type": "call"}, {"api_name": "wx.NewId", "line_number": 86, "usage_type": "call"}, {"api_name": "wx.NewId", "line_number": 88, "usage_type": "call"}, {"api_name": "wx.NewId", "line_number": 100, "usage_type": "call"}, {"api_name": "wx.NewId", "line_number": 101, "usage_type": "call"}, {"api_name": "wx.PyEvent", "line_number": 119, "usage_type": "attribute"}, {"api_name": "wx.PyEvent.__init__", "line_number": 123, "usage_type": "call"}, {"api_name": "wx.PyEvent", "line_number": 123, "usage_type": "attribute"}, {"api_name": "wx.PyEvent", "line_number": 128, "usage_type": "attribute"}, {"api_name": "wx.PyEvent.__init__", "line_number": 132, "usage_type": "call"}, {"api_name": "wx.PyEvent", "line_number": 132, "usage_type": "attribute"}, {"api_name": "wx.PyEvent", "line_number": 137, "usage_type": "attribute"}, {"api_name": "wx.PyEvent.__init__", "line_number": 141, "usage_type": "call"}, {"api_name": "wx.PyEvent", "line_number": 141, "usage_type": "attribute"}, {"api_name": "wx.Frame", "line_number": 146, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 149, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 149, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 151, "usage_type": "attribute"}, {"api_name": "wx.Frame", "line_number": 165, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 168, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 168, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 170, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 203, "usage_type": "call"}, {"api_name": "wx.PostEvent", "line_number": 204, "usage_type": "call"}, {"api_name": "wx.PostEvent", "line_number": 210, "usage_type": "call"}, {"api_name": "wx.PostEvent", "line_number": 215, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 230, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 230, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 230, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 232, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 232, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 234, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 234, "usage_type": "call"}, {"api_name": "wx.PostEvent", "line_number": 241, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 255, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 259, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 259, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 261, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 264, "usage_type": "call"}, {"api_name": "wx.NewId", "line_number": 264, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 276, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 279, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 280, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 290, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 291, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 292, "usage_type": "call"}, {"api_name": "wx.StaticText", "line_number": 295, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 297, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 298, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 299, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 302, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 305, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 375, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 377, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 377, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 378, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 379, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 379, "usage_type": "call"}, {"api_name": "wx.App", "line_number": 396, "usage_type": "attribute"}]} +{"seq_id": "577754853", "text": "from flask import Blueprint\nfrom flask_restful import Api, reqparse, marshal, Resource, inputs\nfrom sqlalchemy import desc\nfrom . import *\nfrom .model import Rents\nfrom flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, get_jwt_claims\nfrom blueprints.User.model import Users\nfrom blueprints.Book.model import Books\nfrom blueprints.Client.model import Clients\n\nfrom blueprints import db,app\n\nbp_rent = Blueprint('rent', __name__)\napi = Api(bp_rent)\n\n\n################################################\n# USING RESTFUL-API # \n################################################\n\n\nclass RentResource(Resource):\n\n def __init__(self):\n pass\n\n @jwt_required\n def get(self, id):\n qry = Rents.query.get(id)\n\n claims = get_jwt_claims()\n if claims['isinternal'] == True:\n return {'status': 'FORBIDDEN', 'message': 'Non-Internal Only!'}, 403\n qry2 = Clients.query.filter_by(client_key = claims['client_key'])\n clientData = qry2.first()\n\n if qry is None or qry.deleted == True:\n return {'status':'NOT_FOUND'}, 404, {'Content-Type' : 'application/json' }\n marshalRents = marshal(qry, Rents.response_fields)\n\n qry_user = Users.query.get(marshalRents[\"user_id\"])\n marshalUser = marshal(qry_user, Users.response_fields)\n qry_book = Books.query.get(marshalRents[\"book_id\"])\n marshalBook = marshal(qry_book, Books.response_fields)\n\n marshalRents[\"user\"] = marshalUser\n marshalRents[\"book\"] = marshalBook\n \n if marshalRents[\"user\"][\"client_id\"] == clientData.id:\n return marshalRents, 200, {'Content-Type' : 'application/json' }\n\n @jwt_required\n def post(self):\n\n parser = reqparse.RequestParser()\n parser.add_argument('book_id', location = 'json', type = int, required = True)\n parser.add_argument('user_id', location = 'json', type = int, required = True)\n args = parser.parse_args()\n\n rent = Rents(args['book_id'], args['user_id'])\n db.session.add(rent)\n db.session.commit()\n\n app.logger.debug('DEBUG : %s', rent)\n\n return marshal(rent, Rents.response_fields), 200, {'Content-Type' : 'application/json' }\n\n @jwt_required\n def delete(self,id):\n qry = Rents.query.get(id)\n\n if qry is None:\n return {'status': 'NOT_FOUND'}, 404\n \n # Hard Delete\n # db.session.delete(qry)\n # db.session.commit()\n\n # Soft Delete\n qry.deleted = True\n db.session.commit()\n return {'status':'Deleted'}, 200\n\n @jwt_required\n def patch(self):\n return 'Not yet implement', 501\n\nclass RentList(Resource):\n\n def __init__(self):\n pass\n \n @jwt_required\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument('p', location = 'args', type = int, default = 1)\n parser.add_argument('rp', location = 'args', type = int, default = 25)\n parser.add_argument('book_id', location = 'args')\n parser.add_argument('user_id', location = 'args')\n \n args = parser.parse_args()\n\n offset = (args['p'] * args['rp']) - args['rp']\n\n qry = Rents.query\n\n if args['book_id'] is not None:\n qry = qry.filter_by(book_id = args['book_id'])\n\n if args['user_id'] is not None:\n qry = qry.filter_by(user_id = args['user_id'])\n \n result = []\n for data in qry:\n \n claims = get_jwt_claims()\n if claims['isinternal'] == True:\n return {'status': 'FORBIDDEN', 'message': 'Non-Internal Only!'}, 403\n qry2 = Clients.query.filter_by(client_key = claims['client_key'])\n clientData = qry2.first()\n \n marshalRents = marshal(data, Rents.response_fields)\n\n qry_user = Users.query.get(marshalRents[\"user_id\"])\n marshalUser = marshal(qry_user, Users.response_fields)\n qry_book = Books.query.get(marshalRents[\"book_id\"])\n marshalBook = marshal(qry_book, Books.response_fields)\n\n marshalRents[\"user\"] = marshalUser\n marshalRents[\"book\"] = marshalBook\n\n if marshalRents[\"user\"][\"client_id\"] == clientData.id:\n result.append(marshalRents)\n\n return result, 200, {'Content-Type' : 'application/json' }\n\napi.add_resource(RentList, '', '/list')\napi.add_resource(RentResource, '', '/')", "sub_path": "Alta Batch 4/Phase 2/Week 1/Day 4/Challenge_5/blueprints/Rent/resource.py", "file_name": "resource.py", "file_ext": "py", "file_size_in_byte": 4505, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "flask.Blueprint", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 22, "usage_type": "name"}, {"api_name": "model.Rents.query.get", "line_number": 29, "usage_type": "call"}, {"api_name": "model.Rents.query", "line_number": 29, "usage_type": "attribute"}, {"api_name": "model.Rents", "line_number": 29, "usage_type": "name"}, {"api_name": "flask_jwt_extended.get_jwt_claims", "line_number": 31, "usage_type": "call"}, {"api_name": "blueprints.Client.model.Clients.query.filter_by", "line_number": 34, "usage_type": "call"}, {"api_name": "blueprints.Client.model.Clients.query", "line_number": 34, "usage_type": "attribute"}, {"api_name": "blueprints.Client.model.Clients", "line_number": 34, "usage_type": "name"}, {"api_name": "flask_restful.marshal", "line_number": 39, "usage_type": "call"}, {"api_name": "model.Rents.response_fields", "line_number": 39, "usage_type": "attribute"}, {"api_name": "model.Rents", "line_number": 39, "usage_type": "name"}, {"api_name": "blueprints.User.model.Users.query.get", "line_number": 41, "usage_type": "call"}, {"api_name": "blueprints.User.model.Users.query", "line_number": 41, "usage_type": "attribute"}, {"api_name": "blueprints.User.model.Users", "line_number": 41, "usage_type": "name"}, {"api_name": "flask_restful.marshal", "line_number": 42, "usage_type": "call"}, {"api_name": "blueprints.User.model.Users.response_fields", "line_number": 42, "usage_type": "attribute"}, {"api_name": "blueprints.User.model.Users", "line_number": 42, "usage_type": "name"}, {"api_name": "blueprints.Book.model.Books.query.get", "line_number": 43, "usage_type": "call"}, {"api_name": "blueprints.Book.model.Books.query", "line_number": 43, "usage_type": "attribute"}, {"api_name": "blueprints.Book.model.Books", "line_number": 43, "usage_type": "name"}, {"api_name": "flask_restful.marshal", "line_number": 44, "usage_type": "call"}, {"api_name": "blueprints.Book.model.Books.response_fields", "line_number": 44, "usage_type": "attribute"}, {"api_name": "blueprints.Book.model.Books", "line_number": 44, "usage_type": "name"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 27, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 55, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 55, "usage_type": "name"}, {"api_name": "model.Rents", "line_number": 60, "usage_type": "call"}, {"api_name": "blueprints.db.session.add", "line_number": 61, "usage_type": "call"}, {"api_name": "blueprints.db.session", "line_number": 61, "usage_type": "attribute"}, {"api_name": "blueprints.db", "line_number": 61, "usage_type": "name"}, {"api_name": "blueprints.db.session.commit", "line_number": 62, "usage_type": "call"}, {"api_name": "blueprints.db.session", "line_number": 62, "usage_type": "attribute"}, {"api_name": "blueprints.db", "line_number": 62, "usage_type": "name"}, {"api_name": "blueprints.app.logger.debug", "line_number": 64, "usage_type": "call"}, {"api_name": "blueprints.app.logger", "line_number": 64, "usage_type": "attribute"}, {"api_name": "blueprints.app", "line_number": 64, "usage_type": "name"}, {"api_name": "flask_restful.marshal", "line_number": 66, "usage_type": "call"}, {"api_name": "model.Rents.response_fields", "line_number": 66, "usage_type": "attribute"}, {"api_name": "model.Rents", "line_number": 66, "usage_type": "name"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 52, "usage_type": "name"}, {"api_name": "model.Rents.query.get", "line_number": 70, "usage_type": "call"}, {"api_name": "model.Rents.query", "line_number": 70, "usage_type": "attribute"}, {"api_name": "model.Rents", "line_number": 70, "usage_type": "name"}, {"api_name": "blueprints.db.session.commit", "line_number": 81, "usage_type": "call"}, {"api_name": "blueprints.db.session", "line_number": 81, "usage_type": "attribute"}, {"api_name": "blueprints.db", "line_number": 81, "usage_type": "name"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 68, "usage_type": "name"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 84, "usage_type": "name"}, {"api_name": "flask_restful.Resource", "line_number": 88, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 95, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 95, "usage_type": "name"}, {"api_name": "model.Rents.query", "line_number": 105, "usage_type": "attribute"}, {"api_name": "model.Rents", "line_number": 105, "usage_type": "name"}, {"api_name": "flask_jwt_extended.get_jwt_claims", "line_number": 116, "usage_type": "call"}, {"api_name": "blueprints.Client.model.Clients.query.filter_by", "line_number": 119, "usage_type": "call"}, {"api_name": "blueprints.Client.model.Clients.query", "line_number": 119, "usage_type": "attribute"}, {"api_name": "blueprints.Client.model.Clients", "line_number": 119, "usage_type": "name"}, {"api_name": "flask_restful.marshal", "line_number": 122, "usage_type": "call"}, {"api_name": "model.Rents.response_fields", "line_number": 122, "usage_type": "attribute"}, {"api_name": "model.Rents", "line_number": 122, "usage_type": "name"}, {"api_name": "blueprints.User.model.Users.query.get", "line_number": 124, "usage_type": "call"}, {"api_name": "blueprints.User.model.Users.query", "line_number": 124, "usage_type": "attribute"}, {"api_name": "blueprints.User.model.Users", "line_number": 124, "usage_type": "name"}, {"api_name": "flask_restful.marshal", "line_number": 125, "usage_type": "call"}, {"api_name": "blueprints.User.model.Users.response_fields", "line_number": 125, "usage_type": "attribute"}, {"api_name": "blueprints.User.model.Users", "line_number": 125, "usage_type": "name"}, {"api_name": "blueprints.Book.model.Books.query.get", "line_number": 126, "usage_type": "call"}, {"api_name": "blueprints.Book.model.Books.query", "line_number": 126, "usage_type": "attribute"}, {"api_name": "blueprints.Book.model.Books", "line_number": 126, "usage_type": "name"}, {"api_name": "flask_restful.marshal", "line_number": 127, "usage_type": "call"}, {"api_name": "blueprints.Book.model.Books.response_fields", "line_number": 127, "usage_type": "attribute"}, {"api_name": "blueprints.Book.model.Books", "line_number": 127, "usage_type": "name"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "505897277", "text": "import time\nfrom pathlib import Path\nfrom typing import Tuple\n\nimport cv2\nfrom djitellopy import Tello\nfrom loguru import logger\n\nimport keyboard\nfrom controller import TelloController\n\n\nclass TelloFly:\n def __init__(self, tello: Tello, controller: TelloController) -> None:\n self.tello = tello\n self.controller = controller\n\n def fly(self, window: Tuple[int, int] = (360, 240), battery: bool = False) -> None:\n self.tello.streamon()\n while True:\n self.control()\n\n image = self.tello.get_frame_read().frame\n image = cv2.resize(image, window)\n\n cv2.imshow(\"Image\", image)\n cv2.waitKey(1)\n\n if keyboard.get_key(\"z\"):\n self.save_image(image)\n\n if battery:\n logger.info(f\"[INFO] Remaining battery: {self.tello.get_battery()}\")\n\n @staticmethod\n def save_image(image, images_dir: str = \"data/images\") -> None:\n images_dir.mkdir(parents=True, exist_ok=True)\n image_savepath = Path(images_dir, f\"{time.time()}.jpg\")\n cv2.imwrite(image_savepath, image)\n logger.info(f\"Image saved to {image_savepath}\")\n\n def control(self) -> None:\n values = self.controller.get_key_control(self.tello)\n self.tello.send_rc_control(values[0], values[1], values[2], values[3])\n time.sleep(0.05)\n", "sub_path": "2_object_detection/tello_fly.py", "file_name": "tello_fly.py", "file_ext": "py", "file_size_in_byte": 1365, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "djitellopy.Tello", "line_number": 14, "usage_type": "name"}, {"api_name": "controller.TelloController", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 18, "usage_type": "name"}, {"api_name": "cv2.resize", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 27, "usage_type": "call"}, {"api_name": "keyboard.get_key", "line_number": 29, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 33, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 33, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 38, "usage_type": "call"}, {"api_name": "time.time", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 39, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 40, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 40, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "334631676", "text": "import codecs\nimport os\nimport sys\nimport datetime\nimport tinify\ntinify.key = \"07kVTh8oGbNFNHn4Fb0XU-t_ncAlJkMq\"\n# tinify.proxy = \"http://user:pass@192.168.0.1:8080\"\nreload(sys)\nsys.setdefaultencoding('utf8')\nrootdir = os.getcwd()\ncompresswidth = 800\nthumbnailwidth = 256\n\ndef hexoCalling():\n print (\"\\n\\n--------------- Calling Hexo Contributor ---------------\\n\")\n os.chdir(rootdir)\n os.system('hexo clean')\n os.system('hexo generate')\n\ndef insertRealIndex():\n print (\"\\n\\n--------------- Insert Homepage ---------------\\n\")\n publicdir = rootdir + '/' + 'public'\n os.system('mv ' + publicdir + '/' + 'index.html ' + publicdir + '/' + 'blog.html')\n for root, dirs, files in os.walk(rootdir + '/' + 'HomePage'):\n for file in files:\n if file == '.DS_Store':\n continue\n print ('Extracting ' + root + '/' + file)\n os.system('cp ' + root + '/' + file + ' ' + publicdir + '/' + file)\n for dir in dirs:\n if dir == '.git':\n continue\n print ('Extracting Directory ' + root + '/' + dir)\n os.system('cp -r ' + root + '/' + dir + ' ' + publicdir + '/' + dir)\n break\n\ndef compressMoveImg():\n print (\"\\n\\n--------------- Compressing Images ---------------\\n\")\n assetsdir = rootdir + '/' + 'source' + '/' + '_posts' + '/' + 'assets'\n\n compressedimgdir_bak = rootdir + '/' + 'source' + '/' + '_posts' + '/' + 'assets_bak'\n if not os.path.exists(compressedimgdir_bak):\n os.system('mkdir ' + compressedimgdir_bak)\n\n thumbnaildir_bak = rootdir + '/' + 'source' + '/' + '_posts' + '/' + 'thumbnails_bak'\n if not os.path.exists(thumbnaildir_bak):\n os.system('mkdir ' + thumbnaildir_bak)\n\n compressedimgdir = rootdir + '/' + 'public' + '/' + 'assets'\n # os.system('rm -r ' + compressedimgdir)\n print (\"Create Directory \" + compressedimgdir)\n os.system('mkdir ' + compressedimgdir)\n\n thumbnaildir = rootdir + '/' + 'public' + '/' + 'thumbnails'\n # os.system('rm -r ' + thumbnaildir)\n print (\"Create Directory \" + thumbnaildir)\n os.system('mkdir ' + thumbnaildir)\n\n for root, dirs, files in os.walk(assetsdir):\n for file in files:\n if file == '.DS_Store':\n continue\n print ('> Compressing ' + root + '/' + file + ' ...')\n if os.path.exists(thumbnaildir_bak + '/' + file):\n print ('Already Exist, Copy File: ' + file)\n os.system('cp ' + thumbnaildir_bak + '/' + file + ' ' + thumbnaildir + '/' + file)\n os.system('cp ' + compressedimgdir_bak + '/' + file + ' ' + compressedimgdir + '/' + file)\n continue\n\n size = os.path.getsize(root + '/' + file)/float(1024)\n if size < 100:\n print ('Skip Compressing with size: ' + str(size) + \" >> \" + root + '/' + file + ' to assets...')\n os.system('cp ' + assetsdir + '/' + file + ' ' + compressedimgdir + '/' + file)\n os.system('cp ' + assetsdir + '/' + file + ' ' + compressedimgdir_bak + '/' + file)\n os.system('cp ' + assetsdir + '/' + file + ' ' + thumbnaildir + '/' + file)\n os.system('cp ' + assetsdir + '/' + file + ' ' + thumbnaildir_bak + '/' + file)\n continue\n\n source = tinify.from_file(root + '/' + file)\n print ('Resizing ' + root + '/' + file + ' to assets...')\n resized = source.resize(method = \"scale\", width = compresswidth)\n resized.to_file(compressedimgdir + '/' + file)\n os.system('cp ' + compressedimgdir + '/' + file + ' ' + compressedimgdir_bak + '/' + file)\n print ('Resizing ' + root + '/' + file + ' to thumbnails...')\n reresize = source.resize(method = \"scale\", width = thumbnailwidth)\n reresize.to_file(thumbnaildir + '/' + file)\n os.system('cp ' + thumbnaildir + '/' + file + ' ' + thumbnaildir_bak + '/' + file)\n print ('Baking File: ' + file)\n break\n\n\ndef MoveToGithubDir():\n print (\"\\n\\n--------------- Moving To Github Directory ---------------\\n\")\n githubdir = rootdir + '/' + 'jinningli.github.io'\n\n for root, dirs, files in os.walk(githubdir):\n for file in files:\n if file == '.DS_Store':\n continue\n print ('Delete ' + root + '/' + file)\n os.system('rm ' + root + '/' + file)\n for dir in dirs:\n if dir == '.git':\n continue\n print ('Delete Directory ' + root + '/' + dir)\n os.system('rm -r ' + root + '/' + dir)\n break\n\n for root, dirs, files in os.walk(rootdir + '/' + 'public'):\n for file in files:\n if file == '.DS_Store':\n continue\n print ('Extracting ' + root + '/' + file)\n os.system('cp ' + root + '/' + file + ' ' + githubdir + '/' + file)\n for dir in dirs:\n if dir == '.git':\n continue\n print ('Extracting Directory ' + root + '/' + dir)\n os.system('cp -r ' + root + '/' + dir + ' ' + githubdir + '/' + dir)\n break\n\n\ndef pushToGithub():\n print (\"\\n\\n--------------- Pushing To Github Directory ---------------\\n\")\n s = raw_input(\"Pushing to jinningli.github.io? (y/n)\\n\")\n if not (s == \"y\" or s == \"Y\"):\n return\n githubdir = rootdir + '/' + 'jinningli.github.io'\n os.chdir(githubdir)\n print('> git add .')\n os.system('git add .')\n print('> git commit -m \\\"Updated, AutoCommit at ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\\"')\n os.system('git commit -m \\\"Updated, AutoCommit at ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\\"')\n print('> git push')\n os.system('git push')\n\n\n\nhexoCalling()\ninsertRealIndex()\ncompressMoveImg()\nMoveToGithubDir()\npushToGithub()\n", "sub_path": "process.py", "file_name": "process.py", "file_ext": "py", "file_size_in_byte": 5883, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "tinify.key", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.setdefaultencoding", "line_number": 9, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 10, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 16, "usage_type": "call"}, {"api_name": "os.system", "line_number": 17, "usage_type": "call"}, {"api_name": "os.system", "line_number": 18, "usage_type": "call"}, {"api_name": "os.system", "line_number": 23, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 24, "usage_type": "call"}, {"api_name": "os.system", "line_number": 29, "usage_type": "call"}, {"api_name": "os.system", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 47, "usage_type": "call"}, {"api_name": "os.system", "line_number": 52, "usage_type": "call"}, {"api_name": "os.system", "line_number": 57, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 66, "usage_type": "call"}, {"api_name": "os.system", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 73, "usage_type": "call"}, {"api_name": "os.system", "line_number": 74, "usage_type": "call"}, {"api_name": "os.system", "line_number": 75, "usage_type": "call"}, {"api_name": "os.system", "line_number": 76, "usage_type": "call"}, {"api_name": "tinify.from_file", "line_number": 79, "usage_type": "call"}, {"api_name": "os.system", "line_number": 83, "usage_type": "call"}, {"api_name": "os.system", "line_number": 87, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 96, "usage_type": "call"}, {"api_name": "os.system", "line_number": 101, "usage_type": "call"}, {"api_name": "os.system", "line_number": 106, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 109, "usage_type": "call"}, {"api_name": "os.system", "line_number": 114, "usage_type": "call"}, {"api_name": "os.system", "line_number": 119, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 129, "usage_type": "call"}, {"api_name": "os.system", "line_number": 131, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "237144350", "text": "from django import forms\n\n\nclass ContactForm(forms.Form):\n name = forms.CharField(max_length=30)\n email = forms.EmailField(max_length=254)\n phone = forms.CharField()\n\n def clean(self):\n cleaned_data = super(ContactForm, self).clean()\n name = cleaned_data.get('name')\n email = cleaned_data.get('email')\n phone = int(cleaned_data.get('phone'))\n if not name and not email and not phone:\n raise forms.ValidationError('You have to write something!')", "sub_path": "mysite/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 502, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "django.forms.Form", "line_number": 4, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 4, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 5, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "django.forms.EmailField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 15, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "390147797", "text": "from models import User\nimport os\nfrom flask import Flask, session, g, render_template, request\nfrom flask.helpers import flash, send_from_directory, url_for\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom werkzeug.utils import redirect\nfrom urllib.parse import urlparse\nfrom flask_bootstrap import Bootstrap\nfrom forms import LoginForm ,UploadForm\n\napp = Flask(__name__)\ndb = SQLAlchemy(app) \ndb.init_app(app)\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\nmigrate = Migrate(app,db)\n\n\nbootstrap = Bootstrap(app)\napp.secret_key = 'Very Hard Secret'\napp.config['UPLOAD_PATH'] = os.path.join(app.root_path, 'uploads')\n\n@app.before_request \ndef get_name(): \n g.name = request.args.get('name')\n\n@app.route('/')\ndef index():\n user = session.get('username')\n isUser = User.query.filter_by(username=user).first()\n if isUser is None:\n session['known'] = False\n else:\n session['known'] = True\n return render_template('index.html', user=user, known=session.get('known', False))\n\n\n@app.route('/hello')\ndef hello():\n return '

hello Flask

' \n\n@app.route('/user/',defaults ={'name': 'pythontxt'})\n@app.route('/user/')\ndef welcome(name):\n res = '

Hello,%s!

' % name\n if 'loginID' in session:\n res += 'Authenticated'\n else:\n res += 'UnAuthenticated'\n return res\n \n@app.route('/test/')\ndef test_view():\n query = 'Flask'\n if request.args:\n query = request.args.get('name', 'Flask')\n host = request.host\n path = request.full_path\n cookie = request.cookies\n method = request.method\n return \"\"\"\n

\n

query string: %s

\n

host: %s

\n

path: %s

\n

cookies: %s

\n

method: %s

\n

\n \"\"\" % (query, host, path, cookie, method)\n\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n username = form.username.data\n session['username'] = username\n user = User(username = username) #登录页面的用户保存到数据库\n db.session.add(user)\n db.session.commit()\n flash(\"登录成功,%s!\" % username)\n return redirect(url_for('index'))\n return render_template('login.html', form=form)\n\n@app.route('/logout/')\ndef logout():\n if 'loginID' in session:\n session.pop('loginID')\n return redirect(url_for('welcome'))\n\n@app.route('/needlogin1/')\ndef needLogin1():\n if 'loginID' in session:\n user = 'needlogin1'\n return render_template('hello.html', user = user)\n else:\n return render_template('needlogin.html') \n\ndef check_next(target):\n ref_url = urlparse(request.host_url)\n test_url = urlparse(target)\n return ref_url.netloc == test_url.netloc\n\n#图片保存加载\n@app.route('/upload', methods=['GET', 'POST'])\ndef upload():\n form = UploadForm()\n if form.validate_on_submit():\n f = form.photo.data\n filename = f.filename\n f.save(os.path.join(app.config['UPLOAD_PATH'], filename))\n flash('上传图片文件成功!')\n session['filename'] = filename \n return redirect(url_for('show_images'))\n return render_template('upload.html', form=form)\n\n@app.route('/uploads/')\ndef get_file(filename):\n return send_from_directory(app.config['UPLOAD_PATH'], filename)\n\n@app.route('/uploaded-images')\ndef show_images():\n return render_template('uploaded.html') \n\n\nif __name__ == '__main__':\n app.run(debug = True)", "sub_path": "HelloFlask/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3731, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask_migrate.Migrate", "line_number": 20, "usage_type": "call"}, {"api_name": "flask_bootstrap.Bootstrap", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.g.name", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 33, "usage_type": "name"}, {"api_name": "models.User.query.filter_by", "line_number": 34, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 34, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.host", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.request.full_path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.request.cookies", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "forms.LoginForm", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 81, "usage_type": "name"}, {"api_name": "models.User", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.helpers.flash", "line_number": 85, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.helpers.url_for", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 91, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 92, "usage_type": "name"}, {"api_name": "werkzeug.utils.redirect", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.helpers.url_for", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 97, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 101, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.request.host_url", "line_number": 104, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 104, "usage_type": "name"}, {"api_name": "urllib.parse.urlparse", "line_number": 105, "usage_type": "call"}, {"api_name": "forms.UploadForm", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "flask.helpers.flash", "line_number": 116, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 117, "usage_type": "name"}, {"api_name": "werkzeug.utils.redirect", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.helpers.url_for", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.helpers.send_from_directory", "line_number": 123, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "439270345", "text": "import pygame\nfrom pygame.sprite import Sprite\n\n\nclass Koopa(Sprite):\n def __init__(self, screen, settings, rect, imglib, soundlib):\n super(Koopa, self).__init__()\n self.map = map\n self.screen = screen\n self.speed_factor = 1 # -1 left +1 right\n self.settings = settings\n self.soundlib = soundlib\n self.lr = 0\n # self.y_mod = 200\n self.dead = False\n self.dead_counter = 0\n self.direction_left = True\n\n self.images = []\n self.images.append(imglib[0])\n self.images.append(imglib[1])\n self.images.append(pygame.transform.flip(imglib[0], True, False))\n self.images.append(pygame.transform.flip(imglib[1], True, False))\n self.images.append(imglib[2])\n self.images.append(imglib[3])\n self.image = self.images[0]\n self.rect = rect\n self.screen_rect = screen.get_rect()\n\n self.rect.x = rect.x\n self.rect.y = rect.y - 28\n\n # Store a decimal value for the ship's center\n self.center_x = float(self.rect.x)\n self.center_y = float(self.rect.y)\n\n # Movement flag\n self.moving_right = False\n self.moving_left = True\n # self.moving_up = False\n # self.moving_down = False\n\n def update(self, mario):\n if mario.rect.x >= self.settings.screenWidth / 2 and mario.vector.x_velocity > 0:\n self.center_x -= mario.vector.x_velocity\n if self.dead:\n self.image = self.images[5]\n if self.dead_counter == 600:\n self.center_y -= 28\n self.speed_factor = 1\n self.dead_counter = 0\n self.dead = False\n elif self.dead_counter == 0:\n # self.soundlib[1][10].play()\n self.center_y += 28\n self.speed_factor = 3\n self.dead_counter += 1\n else:\n self.dead_counter += 1\n if self.moving_right:\n self.center_x += self.speed_factor\n elif self.moving_left:\n self.center_x -= self.speed_factor\n if self.rect.left < -150:\n return True\n\n # Update rect object from self.center\n self.rect.x = self.center_x\n self.rect.y = self.center_y\n return False\n\n def flip_img(self):\n if self.dead:\n pass\n elif self.direction_left:\n self.lr = 2\n else:\n self.lr = 0\n self.direction_left = not self.direction_left\n\n def walk_flip(self):\n if self.dead:\n pass\n elif self.image == self.images[0 + self.lr]:\n self.image = self.images[1 + self.lr]\n else:\n self.image = self.images[0 + self.lr]\n\n def blitme(self):\n # self.screen.blit(self.image, self.rect)\n if self.rect.right < 0 or self.rect.left > self.settings.screenWidth:\n return\n self.screen.blit(self.image, self.rect)\n\n def change_direction(self):\n self.moving_left = not self.moving_left\n self.moving_right = not self.moving_right\n self.flip_img()\n", "sub_path": "koopa.py", "file_name": "koopa.py", "file_ext": "py", "file_size_in_byte": 3136, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 5, "usage_type": "name"}, {"api_name": "pygame.transform.flip", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "600567423", "text": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nsignatures = []\n\ndb = open('extensions.txt', 'w')\n\nfor i in range(1, 19):\n url = 'https://filesignatures.net/index.php?page=all&order=EXT&alpha=¤tpage=%d' % i\n\n webpage = urlopen(url)\n\n source = BeautifulSoup(webpage, 'html5lib').find('table', {'id': 'innerTable'}).find('tbody')\n\n for i in source.findChildren('tr'):\n t = list(j.get_text().strip() for j in i.findChildren('td'))\n\n t.pop(0)\n\n if t[0] != 'Extension':\n t[1] = ''.join(t[1].split(' '))\n \n db.write('\\\\'.join(t))\n db.write('\\n')\n\ndb.close()\n", "sub_path": "crawler.py", "file_name": "crawler.py", "file_ext": "py", "file_size_in_byte": 653, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "urllib.request.urlopen", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "539950651", "text": "#---------------------#\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom pprint import pprint\nimport os\nimport re\nimport datetime\n#---------------------#\nimport date_message\n#---------------------#\n\ndate_message.reload_today()\n\nscope = [\"https://spreadsheets.google.com/feeds\",'https://www.googleapis.com/auth/spreadsheets',\"https://www.googleapis.com/auth/drive.file\",\"https://www.googleapis.com/auth/drive\"]\n\ntestSheets = ServiceAccountCredentials.from_json_keyfile_name(\"testSheets.json\", scope)\n\nclients = gspread.authorize(testSheets)\nsheet = clients.open(\"2020-06 jjambab\").sheet1\n\ndef write_jjambab():\n with open(\"./jjambab_2020_06.txt\", 'w+t') as file:\n j_data = str(data)\n text = re.sub('[-=+,#/\\?:^$.@*\\\"※~&%ㆍ!』\\\\‘|\\(\\)\\[\\]\\<\\>`\\'…》{}]', '', j_data)\n text = text.replace(\"\\\\n\", \"/\")\n file.write(text)\n return text\n\ndata = sheet.get_all_records()\nj_data = write_jjambab()\n\ndef last_index():\n load_text = j_data.split()\n i = int(len(load_text)/6) + 1\n return i\n\ndef test_reload():\n load_text = j_data.split()\n breakfast = []\n lunch = []\n dinner = []\n\t\n for i in range(1,int(len(load_text)),6):\n breakfast.append(str(load_text[i]))\n for i in range(3,int(len(load_text)),6):\n lunch.append(str(load_text[i]))\n for i in range(5,int(len(load_text)),6):\n dinner.append(str(load_text[i]))\n\n return breakfast,lunch,dinner\n \ndef search_jjambab(result):\n search = int(result)\n search_result = \"단결! \" + str(date_message.todayM) + \"월\" + str(search) + \"일\" + \" 짬밥입니다!!\\n\"\n searchs_result = \"\"\n return search_result, searchs_result\n\ndef result_jjambab(result):\n if result == \"오늘\":\n title = \"단결! \" + str(date_message.todayM) + \"월\" + str(date_message.todayD) + \"일\" + \" 짬밥입니다!!\\n\"\n description = \"\"\n if result == \"내일\":\n title = \"단결! \" + str(date_message.todayM) + \"월\" + str(date_message.tomorrowD) + \"일\" + \" 짬밥입니다!!\\n\"\n description = \"\"\n if result == \"어제\":\n title = \"단결! \" + str(date_message.todayM) + \"월\" + str(date_message.yesterdayD) + \"일\" + \" 짬밥입니다!!\\n\"\n description = \"\"\n if result == \"아침\":\n title = \"단결! 오늘의 아침밥을 불러드렸습니다!\"\n description = \"\"\n if result == \"점심\":\n title = \"단결! 오늘의 점심밥을 불러드렸습니다!\"\n description = \"\"\n if result == \"저녁\":\n title = \"단결! 오늘의 저녁밥을 불러드렸습니다!\"\n description = \"\"\n if result == \"내일아침\":\n title = \"단결! 내일의 아침밥을 불러드렸습니다!\"\n description = \"\"\n return title, description\n", "sub_path": "jjambab_message.py", "file_name": "jjambab_message.py", "file_ext": "py", "file_size_in_byte": 2798, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "date_message.reload_today", "line_number": 12, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name", "line_number": 16, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials", "line_number": 16, "usage_type": "name"}, {"api_name": "gspread.authorize", "line_number": 18, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 24, "usage_type": "call"}, {"api_name": "date_message.todayM", "line_number": 54, "usage_type": "attribute"}, {"api_name": "date_message.todayM", "line_number": 60, "usage_type": "attribute"}, {"api_name": "date_message.todayD", "line_number": 60, "usage_type": "attribute"}, {"api_name": "date_message.todayM", "line_number": 63, "usage_type": "attribute"}, {"api_name": "date_message.tomorrowD", "line_number": 63, "usage_type": "attribute"}, {"api_name": "date_message.todayM", "line_number": 66, "usage_type": "attribute"}, {"api_name": "date_message.yesterdayD", "line_number": 66, "usage_type": "attribute"}]} +{"seq_id": "240594860", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 8 11:22:04 2016\n\n@author: Aaron Shortt\nTitle: Data quality Report \n\nInput: FeatureNames.txt\n DataSet.txt\n \nOutput: studentNumberCONT.csv\n \n\n\"\"\"\n\nimport pandas as pd\nimport plotly.offline as py\nfrom plotly import tools\nimport csv\nimport numpy as np\nfrom collections import Counter\nfrom plotly.tools import FigureFactory as FF\nimport plotly.graph_objs as go\n\n\n\ncolumnPath = '.\\data\\\\featurenames.txt'\ndataPath = '.\\data\\DataSet.txt'\n\n#Read in column names\ncolumns = list()\nfor line in open(columnPath):\n line = line.replace('\\n','')\n columns.append(line)\ncolumns.pop();\n\n#Read in csv\nunknown = ['?',' ','NaN']\ndf = pd.read_csv(dataPath, names=columns, na_values = unknown)\n\ncontHeaders = ['FEATURENAME','Count', '% Miss', 'Cardinality', 'Min', '1st Qrt', 'Mean', 'Median', '3rd Qrt', 'Max', 'Std Deviation']\ncatHeaders = ['FEATURENAME','Count', '% Miss', 'Cardinality', 'Mode', 'Mode Frequency', 'Mode %', '2nd Mode', '2nd Mode Frequency', '2nd Mode %']\n\ncontRows = list()\ncatRows = list()\n\ncont = [1,3,5,11,12,13]\ncat = [2,4,6,7,8,9,10,14,15]\n\nfor x in cont:\n contRows.append(\n [columns[x].upper(),\n df[columns[x]].count(),\n round(((df[columns[x]].isnull().sum().sum() / df[columns[x]].count()) * 100),4),\n len(df[columns[x]].unique()),\n df[columns[x]].min(),\n round(df[columns[x]].quantile(.25), 4),\n round(df[columns[x]].mean(), 4),\n round(df[columns[x]].median(), 4),\n round(df[columns[x]].quantile(.75), 4),\n df[columns[x]].max(),\n round(df[columns[x]].std(), 4)\n ]\n )\n \nfor x in cat:\n data = Counter(df[columns[x]].tolist())\n modes = data.most_common(2)\n catRows.append(\n [columns[x].upper(),\n df[columns[x]].count(),\n round(((df[columns[x]].isnull().sum().sum() / df[columns[x]].count()) * 100),4),\n len(df[columns[x]].unique()),\n modes[0][0],\n modes[0][1],\n round((modes[0][1] / df[columns[x]].count() * 100), 4),#mode percentage,\n modes[1][0],#2nd mode,\n modes[1][1],#2nd mode frequency,\n round((modes[1][1] / df[columns[x]].count() * 100), 4)#2ndmode percentage,\n ]\n )\n \n# Adding Rows to Continious Table\ncont_data_matrix = [contHeaders,\n contRows[0],\n contRows[1],\n contRows[2],\n contRows[3],\n contRows[4],\n contRows[5]\n ]\n# Adding Rows to Catagorical Table \ncat_data_matrix = [catHeaders,\n catRows[0],\n catRows[1],\n catRows[2],\n catRows[3],\n catRows[4],\n catRows[5],\n catRows[6],\n catRows[7],\n catRows[8]\n ]\n \n\ncont_table = FF.create_table(cont_data_matrix, index= True,index_title='FeatureName')\ncat_table = FF.create_table(cat_data_matrix, index= True,index_title='FeatureName')\n\n#==============================================================================\n#Comma Seperated Output Files \n#==============================================================================\n\ncontCSV = open('.\\data\\C11483622CONT.csv', 'w')\nwr = csv.writer(contCSV, quoting=csv.QUOTE_ALL)\nwr.writerow(contHeaders)\nwr.writerows(contRows)\ncontCSV.flush()\ncontCSV.close()\n\ncatCSV = open('.\\data\\C11483622CAT.csv', 'w')\nwr2 = csv.writer(catCSV, quoting=csv.QUOTE_ALL)\nwr2.writerow(catHeaders)\nwr2.writerows(catRows)\ncatCSV.flush()\ncatCSV.close()\n\n#==============================================================================\n# Graphing using Plotly (if not working, 'Pip install plotly') ... Voila\n# Dist Bar charts\n#==============================================================================\nfig2 = tools.make_subplots(rows=2, cols=2, specs=[[{}, {}], [{'colspan': 2}, None]],\n subplot_titles=('First Mode','Second Mode', 'Standard Deviation'))\n\nlm1 = list()\nlm2 = list()\n\nfor x in cat:\n data = Counter(df[columns[x]].tolist())\n modes = data.most_common(2)\n lm1.append([modes[0][0], modes[0][1]])\n lm2.append([modes[1][0], modes[1][1]])\n\nbar1 = go.Bar(\n x = [pair[0] for pair in lm1],\n y = [pair[1] for pair in lm1],\n name = 'First Modal'\n)\n\nbar2 = go.Bar(\n x = [pair[0] for pair in lm2],\n y = [pair[1] for pair in lm2],\n name = 'Second Modal'\n)\n\nfig2.append_trace(bar1,1,1)\nfig2.append_trace(bar2,1,2)\n\n\n\n#Deviation\nl = list()\nu = list()\nm = list()\nfor x in cont:\n l.append(round(df[columns[x]].std(),4))\n u.append(columns[x])\n m.append(round(df[columns[x]].mean(),4))\n\nline = go.Scatter(\n y = m,\n x = u,\n error_y= dict(\n type='data',\n array=l,\n visible=True\n ),\n name = 'Standard Deviation'\n)\n\nfig2.append_trace(line,2,1)\n\n\n#Uncomment these two lines to generate the data tables. \npy.plot(cont_table, filename='C11483622CONT.html')\npy.plot(cat_table, filename='C11483622CAT.html')\npy.plot(fig2)\n\n\n", "sub_path": "c11483622.py", "file_name": "c11483622.py", "file_ext": "py", "file_size_in_byte": 5081, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pandas.read_csv", "line_number": 39, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 67, "usage_type": "call"}, {"api_name": "plotly.tools.FigureFactory.create_table", "line_number": 106, "usage_type": "call"}, {"api_name": "plotly.tools.FigureFactory", "line_number": 106, "usage_type": "name"}, {"api_name": "plotly.tools.FigureFactory.create_table", "line_number": 107, "usage_type": "call"}, {"api_name": "plotly.tools.FigureFactory", "line_number": 107, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 114, "usage_type": "call"}, {"api_name": "csv.QUOTE_ALL", "line_number": 114, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 121, "usage_type": "call"}, {"api_name": "csv.QUOTE_ALL", "line_number": 121, "usage_type": "attribute"}, {"api_name": "plotly.tools.make_subplots", "line_number": 131, "usage_type": "call"}, {"api_name": "plotly.tools", "line_number": 131, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 138, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 143, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 143, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 149, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 149, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 169, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 169, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 184, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 184, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 185, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 185, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 186, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 186, "usage_type": "name"}]} +{"seq_id": "60581878", "text": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\nimport os\nfrom knack.log import get_logger\n\n\ndef storage_file_upload(client, local_file_path, content_settings=None,\n metadata=None, validate_content=False, progress_callback=None, max_connections=2, timeout=None):\n\n upload_args = {\n 'content_settings': content_settings,\n 'metadata': metadata,\n 'validate_content': validate_content,\n 'max_concurrency': max_connections,\n 'timeout': timeout\n }\n\n if progress_callback:\n upload_args['raw_response_hook'] = progress_callback\n\n # Because the contents of the uploaded file may be too large, it should be passed into the a stream object,\n # upload_file() read file data in batches to avoid OOM problems\n count = os.path.getsize(local_file_path)\n with open(local_file_path, 'rb') as stream:\n response = client.upload_file(data=stream, length=count, **upload_args)\n\n return response\n\n\ndef storage_file_upload_batch(cmd, client, destination, source, destination_path=None, pattern=None, dryrun=False,\n validate_content=False, content_settings=None, max_connections=1, metadata=None,\n progress_callback=None):\n \"\"\" Upload local files to Azure Storage File Share in batch \"\"\"\n\n from ..util import glob_files_locally, normalize_blob_file_path, guess_content_type\n from ..track2_util import make_file_url\n\n source_files = [c for c in glob_files_locally(source, pattern)]\n logger = get_logger(__name__)\n settings_class = cmd.get_models('_models#ContentSettings')\n\n if dryrun:\n logger.info('upload files to file share')\n logger.info(' account %s', client.account_name)\n logger.info(' share %s', destination)\n logger.info(' total %d', len(source_files))\n return [{'File': make_file_url(client, os.path.dirname(dst) or None, os.path.basename(dst)),\n 'Type': guess_content_type(src, content_settings, settings_class).content_type} for src, dst in\n source_files]\n\n # TODO: Performance improvement\n # 1. Upload files in parallel\n def _upload_action(src, dst):\n dst = normalize_blob_file_path(destination_path, dst)\n dir_name = os.path.dirname(dst)\n file_name = os.path.basename(dst)\n\n _make_directory_in_files_share(client, dir_name)\n\n logger.warning('uploading %s', src)\n\n storage_file_upload(client.get_file_client(dst), src, content_settings, metadata, validate_content,\n progress_callback, max_connections)\n\n return make_file_url(client, dir_name, file_name)\n\n return list(_upload_action(src, dst) for src, dst in source_files)\n\n\ndef _make_directory_in_files_share(share_client, directory_path, existing_dirs=None):\n \"\"\"\n Create directories recursively.\n This method accept a existing_dirs set which serves as the cache of existing directory. If the\n parameter is given, the method will search the set first to avoid repeatedly create directory\n which already exists.\n \"\"\"\n from azure.common import AzureHttpError\n from azure.core.exceptions import ResourceExistsError\n\n if not directory_path:\n return\n\n parents = [directory_path]\n p = os.path.dirname(directory_path)\n while p:\n parents.append(p)\n p = os.path.dirname(p)\n\n for dir_name in reversed(parents):\n if existing_dirs and (dir_name in existing_dirs):\n continue\n\n try:\n share_client.get_directory_client(directory_path=dir_name).create_directory()\n except ResourceExistsError:\n pass\n except AzureHttpError:\n from knack.util import CLIError\n raise CLIError('Failed to create directory {}'.format(dir_name))\n\n if existing_dirs:\n existing_dirs.add(directory_path)\n", "sub_path": "src/storage-preview/azext_storage_preview/operations/file.py", "file_name": "file.py", "file_ext": "py", "file_size_in_byte": 4200, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "os.path.getsize", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "util.glob_files_locally", "line_number": 40, "usage_type": "call"}, {"api_name": "knack.log.get_logger", "line_number": 41, "usage_type": "call"}, {"api_name": "track2_util.make_file_url", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 49, "usage_type": "call"}, {"api_name": "util.guess_content_type", "line_number": 50, "usage_type": "call"}, {"api_name": "util.normalize_blob_file_path", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "track2_util.make_file_url", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "azure.core.exceptions.ResourceExistsError", "line_number": 97, "usage_type": "name"}, {"api_name": "azure.common.AzureHttpError", "line_number": 99, "usage_type": "name"}, {"api_name": "knack.util.CLIError", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "433421921", "text": "import gym\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.distributions import Categorical\nimport random\nfrom collections import deque\n\n# Hyperparameters\n# EPISODES = 10000\nLEARNING_RATE = 0.0005\nDISCOUNT_FACTOR = 0.98\nT_GLOBAL_MAX = 10000\nT_TREAD_MAX = 100\n\n\nclass Network(nn.Module):\n \"\"\"\n copied from asynchronous-ppo.py\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(4, 128)\n self.fc2 = nn.Linear(128, 128)\n self.p = nn.Linear(128, 2)\n self.value = nn.Linear(128, 1)\n\n def pi(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n prob = F.softmax(self.p(x), dim=1)\n return prob\n\n def v(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n return self.value(x)\n\n\ndef initilize():\n \"\"\"\"\"\"\n\n\ndef simulate(obs, env):\n \"\"\"\"\"\"\n samples, score, step = [], 0.0, 0\n while (done is not True) or (t - t_start < T_TREAD_MAX):\n # Perform a_t according to policy pi\n prob = net.pi(torch.tensor(obs).unsqueeze(0).float())\n prob_ = Categorical(prob)\n action = prob_.sample().item()\n\n # Receive reward r_t and new state s_t+1\n next_obs, reward, done, info = env.step(action) # next_obs=s_t+1, reward=r_t\n samples.append((obs, action, prob[0][action], reward / 100.0, next_obs, done))\n t = t + 1\n T = T + 1\n\n return samples\n\n\ndef mini_batch(samples):\n \"\"\"\"\"\"\n obs, acts, rewards, next_obs, done = zip(*samples)\n obs = torch.tensor(obs).float()\n acts = torch.tensor(acts)\n rewards = torch.tensor(rewards).float()\n next_obs = torch.tensor(next_obs).float()\n done = torch.tensor(done).int()\n\n return obs, acts, rewards, next_obs, done\n\n\ndef train(net, samples, optimizer):\n \"\"\"\n 개어렵넹\n \"\"\"\n obs, acts, rewards, next_obs, done = mini_batch(samples)\n if done[-1] is True:\n R = 0\n else:\n R = net.v(obs[-1]) # Bootstrap from last state\n\n for i in reversed(range(len(samples))):\n R = rewards[i] + DISCOUNT_FACTOR * R\n p_loss = torch.log(net.pi(obs[i])) * (R - net.v(obs[i]))\n v_loss = F.mse_loss(net.v(obs[i]), R.detach())\n loss = p_loss + v_loss\n optimizer.zero_grad()\n loss.backward()\n for global_param, local_param in zip(global_net.parameters(), net.parameters()):\n global_param._grad = local_param.grad\n optimizer.step()\n\n\ndef predict():\n \"\"\"\"\"\"\n\n\n# def main():\n# \"\"\"\"\"\"\n\n\nif __name__ == \"__main__\":\n # initialize\n # until converge\n # simulate\n # mini_batch\n # train\n\n # initialize\n env = gym.make(\"CartPole-v1\")\n net = Network()\n optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)\n\n theta_global = 0\n vtheta_global = 0\n theta_thread = 0\n vtheta_thread = 0\n t = 1\n # until converge\n for t_global in range(T_GLOBAL_MAX):\n theta_thread = theta_global\n vtheta_thread = vtheta_global\n t_start = t\n if done is True:\n obs = env.reset()\n done = False\n samples = simulate(obs, env)\n\n # train\n train(samples, optimizer)\n", "sub_path": "src/a3c.py", "file_name": "a3c.py", "file_ext": "py", "file_size_in_byte": 3239, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "torch.nn.Module", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.distributions.Categorical", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn.functional.mse_loss", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 89, "usage_type": "name"}, {"api_name": "gym.make", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 116, "usage_type": "name"}]} +{"seq_id": "620583775", "text": "import requests, json\r\nimport re\r\nfrom .mparser import Parser\r\n\r\n\r\nclass MLApi():\r\n apiUrl = \"https://api.mercadolibre.com/sites/MLA\"\r\n\r\n @staticmethod\r\n def search(term):\r\n r = requests.get(\r\n MLApi.apiUrl + \"/search\",\r\n params={\"q\": term, \"sort\": \"price_asc\"}\r\n )\r\n r = json.loads(r.text)\r\n parser = Parser(\"mercadolibre\")\r\n out = []\r\n for product in r[\"results\"]:\r\n product[\"currency\"] = product[\"currency_id\"]\r\n product[\"url\"] = product[\"permalink\"]\r\n product[\"image_url\"] = MLApi.get_medium_sized_thumbnail(product[\"thumbnail\"])\r\n product = parser.parse(product)\r\n out.append(product)\r\n\r\n return out\r\n\r\n @staticmethod\r\n def get_medium_sized_thumbnail(thumbnail_url):\r\n p = re.compile(r\"-I\\.jpg$\")\r\n return p.sub(\"-J.jpg\", thumbnail_url)\r\n\r\n\r\n", "sub_path": "back/scrapers/ml.py", "file_name": "ml.py", "file_ext": "py", "file_size_in_byte": 902, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "mparser.Parser", "line_number": 16, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "230778587", "text": "import bs4\nimport requests\nfrom urllib.request import Request, urlopen\n\nprint(\"\\nscraping jobs at desktopmetal\")\nsite= \"https://boards.greenhouse.io/desktopmetal\"\nhdr = {'User-Agent': 'Mozilla/5.0'}\nreq = Request(site,headers=hdr)\npage = urlopen(req)\nbs = bs4.BeautifulSoup(page,\"html.parser\")\njobs = bs.find_all('div',{'class':'opening'})\n# print(len(jobs))\nopen('desktopmetal-jobs.txt','w').close()\nfile = open('desktopmetal-jobs.txt','a+')\nfor job in jobs:\n link = 'https://boards.greenhouse.io'+job.find('a')['href']\n # print(link)\n descriptionHTML = requests.get(link)\n bs1 = bs4.BeautifulSoup(descriptionHTML.text,\"html.parser\")\n title = bs1.find('h1',{'class':'app-title'}).get_text()\n title = title.split('–')[0]\n location = bs1.find('div',{'class':'location'}).get_text().strip(\"\\n\").strip(\" \")\n description = bs1.find('div',{'id':'content'}).get_text()\n # print(title,location)\n file.write(\"Title: \"+title+\"\\nLocation: \"+location+\"\\nDescription: \"+description)\n file.write(\"\\n--------------------------------\\n\")\nfile.close()\nprint(\"desktopmetal jobs scraped successfully\")\n", "sub_path": "desktopmetal/desktopmetal.py", "file_name": "desktopmetal.py", "file_ext": "py", "file_size_in_byte": 1119, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "urllib.request.Request", "line_number": 8, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "477469239", "text": "import math\nimport copy\nimport numpy as np\nimport cv2\nimport sys\n\nfrom lib.img_anno_manage import ImgAnnoManage\n\n\nclass IsolateRects:\n def __init__(self, debug=False, init_show=False):\n\n self.iam = ImgAnnoManage()\n\n self.debug = debug\n self.init_show = init_show\n\n self.thresh_vals = 230 # 220~240\n self.thresh_candi_dis = 0.5\n self.thresh_candi_height = 0.3\n self.thresh_candi_width = 0.5\n\n self.thresh_same_color = 0.17\n\n self.thresh_sz_vertical, self.thresh_sz_horizontal = None, None\n pass\n\n def get_contours(self, img, mask):\n height, width = img.shape[:2]\n self.thresh_sz_horizontal = width // 100\n self.thresh_sz_vertical = height // 100\n\n kernel_h = np.ones((1, self.thresh_sz_horizontal), dtype=int) # horizontal erode for remove the txt charactors ~ font size\n erode_h = cv2.erode(mask, kernel_h)\n\n kernel_v = np.ones((self.thresh_sz_vertical , 1), dtype=int) # padding between bar and bar 1/70 ~ 1/100\n erode_v = cv2.erode(erode_h, kernel_v)\n\n _, contours, hierarchy = cv2.findContours(erode_v, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n rects = []\n candi_parents = []\n for c in range(len(contours)):\n [next, previous, first_child, parent] = hierarchy[0][c]\n (x, y, w, h) = cv2.boundingRect(contours[c])\n if parent == -1 or parent in candi_parents:\n rects.append([x, y, w, h])\n # cv2.drawContours(img, contour, -1, (0, 255, 0), 1)\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 1)\n elif parent == -1 and w * h > (height * width * 0.2):\n candi_parents.append(c)\n\n if self.init_show:\n cv2.imshow(\"thresh\", mask)\n cv2.imshow(\"erode_h\", erode_h)\n cv2.imshow(\"erode_v\", erode_v)\n cv2.imshow(\"contour\", cv2.resize(img, (700, int(img.shape[0] * 700 / img.shape[1]))))\n cv2.waitKey(0)\n pass\n return rects\n\n def classify_group(self, rects):\n li_groups = []\n\n while len(rects) > 0:\n cur_rect = rects[0]\n if not self.__candi_size(rect=cur_rect):\n rects.remove(cur_rect)\n continue\n\n group = [cur_rect]\n top_rect = self.top_neighbor_rect(src=cur_rect, rects=rects)\n while top_rect is not None:\n group = [top_rect] + group\n cur_rect = top_rect\n top_rect = self.top_neighbor_rect(src=cur_rect, rects=rects)\n\n cur_rect = group[-1]\n bottom_rect = self.bottom_neigbor_rect(src=cur_rect, rects=rects)\n while bottom_rect is not None:\n group = group + [bottom_rect]\n cur_rect = bottom_rect\n bottom_rect = self.bottom_neigbor_rect(src=cur_rect, rects=rects)\n\n li_groups.append(group)\n for g in group:\n rects.remove(g)\n\n return li_groups\n\n def __candi_height(self, bottom, top):\n [x1, y1, w1, h1] = top\n [x2, y2, w2, h2] = bottom\n return math.fabs(h2 - h1) < (h1 + h2) * self.thresh_candi_height / 2\n\n def __candi_2x_height(self, r_2h, r_1h):\n [x1, y1, w1, h1] = r_1h\n [x2, y2, w2, h2] = r_2h\n return math.fabs(h2 // 2 - h1) < (h1 + h2 // 2) * self.thresh_candi_height / 2\n\n def __candi_width(self, bottom, top):\n [x1, y1, w1, h1] = top\n [x2, y2, w2, h2] = bottom\n return 0 <= (w2 - w1) < (w1 + w2) * self.thresh_candi_width / 2\n\n def __distance(self, pt1, pt2):\n dis = math.sqrt((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2)\n return dis\n\n def __candi_size(self, rect):\n [x, y, w, h] = rect\n return w > self.thresh_sz_horizontal and h > self.thresh_sz_vertical\n\n def __candi_distance(self, bottom, top):\n [x1, y1, w1, h1] = top\n _pt1 = (x1, y1 + h1)\n\n [x2, y2, w2, h2] = bottom\n _pt2 = (x2, y2)\n\n dis = self.__distance(pt1=_pt1, pt2=_pt2)\n\n if _pt1[1] < _pt2[1] and dis < self.thresh_candi_dis * (h1 + h2) / 2:\n return dis\n else:\n return None\n\n #\n def top_neighbor_rect(self, src, rects):\n # [x1, y1, w1, h1] = src\n min_dis = 0\n min_rect = None\n i = 0\n while i < len(rects):\n rect = rects[i]\n if rect == src:\n i += 1\n continue\n\n if self.__candi_width(bottom=src, top=rect):\n dis = self.__candi_distance(bottom=src, top=rect)\n if dis is not None: # candidate distance\n if self.__candi_height(bottom=src, top=rect):\n if min_rect is None or dis < min_dis:\n min_dis = dis\n min_rect = rect\n elif self.__candi_2x_height(r_2h=rect, r_1h=src):\n [x2, y2, w2, h2] = rect\n sp_t_rect = [x2, y2, w2-1, h2 // 2 - 1]\n sp_b_rect = [x2, y2 + h2 // 2 + 1, w2, h2 // 2]\n rects.remove(rect)\n rects.append(sp_t_rect)\n rects.append(sp_b_rect)\n continue\n i += 1\n return min_rect\n\n #\n def bottom_neigbor_rect(self, src, rects):\n min_dis = 0\n min_rect = None\n i = 0\n while i < len(rects):\n rect = rects[i]\n if rect == src:\n i += 1\n continue\n if self.__candi_width(bottom=rect, top=src):\n dis = self.__candi_distance(bottom=rect, top=src)\n if dis is not None:\n if self.__candi_height(bottom=rect, top=src):\n if min_rect is None or dis < min_dis:\n min_dis = dis\n min_rect = rect\n elif self.__candi_2x_height(r_2h=rect, r_1h=src):\n [x2, y2, w2, h2] = rect\n sp_t_rect = [x2, y2, w2, h2 // 2 - 1]\n sp_b_rect = [x2, y2 + h2 // 2 + 1, w2, h2 // 2]\n rects.remove(rect)\n rects.append(sp_t_rect)\n rects.append(sp_b_rect)\n continue\n i += 1\n return min_rect\n\n def split_img(self, img, binary, groups):\n img_h, img_w = img.shape[:2]\n\n # count the number of groups\n sub_parts = []\n i = 0\n while i < len(groups):\n if len(groups[i]) >= 5:\n sub_parts.append(groups[i])\n del groups[i]\n else:\n i += 1\n\n # calculate the split coordinate\n split_x, split_y = 0, 0\n if len(sub_parts) == 1:\n split_x, split_y = 0, 0\n elif len(sub_parts) == 2:\n corners = []\n for sub_part in sub_parts:\n np_rects = np.array(sub_part)\n min_x = np.amin(np_rects, axis=0)[0]\n min_y = np.amin(np_rects, axis=0)[1]\n corners.append([min_x, min_y])\n\n if math.fabs(corners[0][0] - corners[1][0]) > math.fabs(corners[0][1] - corners[1][1]):\n # horizonal split\n split_x = min(img_w // 2, max(corners[0][0], corners[1][0]))\n else:\n # vertical split\n split_y = min(img_h // 2, max(corners[0][1], corners[1][1]))\n else:\n sys.stderr.write(\"unknown structure {}\\n\".format(len(sub_parts)))\n return\n # sys.exit(0)\n\n # split images and calibrate the coordinates of the ract\n splits = []\n if split_x == 0 and split_y == 0:\n rects = []\n for group in groups:\n rects.extend(group)\n splits = [{'img': img,\n 'bin': binary,\n 'rects': rects,\n 'bars': sub_parts[0]}]\n else:\n if split_x != 0:\n img_left = img[:, :split_x]\n bin_left = binary[:, :split_x]\n img_right = img[:, split_x:]\n bin_right = binary[:, split_x:]\n\n if sub_parts[0][0] < sub_parts[1][0]:\n group_left = sub_parts[0]\n group_right = sub_parts[1]\n else:\n group_left = sub_parts[1]\n group_right = sub_parts[0]\n for r in range(len(group_right)):\n [x, y, w, h] = group_right[r]\n group_right[r] = [x-split_x, y, w, h]\n\n rects_left, rects_right = [], []\n for group in groups:\n for rect in group:\n [x, y, w, h] = rect\n if x < split_x:\n rects_left.append(rect)\n else:\n rects_right.append([x - split_x, y, w, h])\n splits = [{'img': img_left,\n 'bin': bin_left,\n 'rects': rects_left,\n 'bars': group_left},\n {'img': img_right,\n 'bin': bin_right,\n 'rects': rects_right,\n 'bars': group_right}\n ]\n if split_y != 0:\n img_top = img[:split_y, :]\n bin_top = binary[:split_y, :]\n img_bottom = img[split_y:, :]\n bin_bottom = binary[split_y:, :]\n\n if sub_parts[0][1] < sub_parts[1][1]:\n group_top = sub_parts[0]\n group_bottom = sub_parts[1]\n else:\n group_top = sub_parts[1]\n group_bottom = sub_parts[0]\n for r in range(len(group_bottom)):\n [x, y, w, h] = group_bottom[r]\n group_bottom[r] = [x, y - split_y, w, h]\n\n rects_top, rects_bottom = [], []\n for group in groups:\n for rect in group:\n [x, y, w, h] = rect\n if y < split_y:\n rects_top.append(rect)\n else:\n rects_bottom.append([x, y - split_y, w, h])\n splits = [{'img': img_top,\n 'bin': bin_top,\n 'rects': rects_top,\n 'bars': group_top},\n {'img': img_bottom,\n 'bin': bin_bottom,\n 'rects': rects_bottom,\n 'bars': group_bottom}\n ]\n if self.debug and False:\n for sp in splits:\n show_img = sp['img'].copy()\n rects = sp['rects']\n bars = sp['bars']\n for (x, y, w, h) in rects:\n cv2.rectangle(show_img, (int(x), int(y)), (int(x + w), int(y + h)), (0, 0, 255), -1)\n for (x, y, w, h) in bars:\n cv2.rectangle(show_img, (int(x), int(y)), (int(x + w), int(y + h)), (255, 0, 0), -1)\n cv2.imshow(\"sp\", cv2.resize(show_img, (700, show_img.shape[0] * 700 // show_img.shape[1])))\n cv2.waitKey(0)\n return splits\n\n def __get_rect_color(self, img, rect):\n hist_rods = 16\n\n [x, y, w, h] = rect\n crop = img[y:y + h, x:x + w]\n\n hist_b = cv2.calcHist([crop], [0], None, [hist_rods], [0, 256])\n hist_g = cv2.calcHist([crop], [1], None, [hist_rods], [0, 256])\n hist_r = cv2.calcHist([crop], [2], None, [hist_rods], [0, 256])\n\n b = np.argmax(hist_b) * 256.0 / hist_rods + 0.5 * 256.5 / hist_rods\n g = np.argmax(hist_g) * 256.0 / hist_rods + 0.5 * 256.5 / hist_rods\n r = np.argmax(hist_r) * 256.0 / hist_rods + 0.5 * 256.5 / hist_rods\n\n return [r, g, b]\n\n def __compare_colors(self, color1, color2):\n [r1, g1, b1] = color1\n [r2, g2, b2] = color2\n dis = math.sqrt((r1-r2)**2 + (g1-g2)**2 + (b1-b2)**2)\n len1 = math.sqrt(r1**2 + g1**2 + b1**2)\n len2 = math.sqrt(r2**2 + g2**2 + b2**2)\n return dis / ((len1 + len2) / 2)\n\n def __same_level(self, rect1, rect2):\n margin = 0.2\n x1, y1, w1, h1 = rect1\n x2, y2, w2, h2 = rect2\n return y1 - margin * h1 < y2 + h2 // 2 < y1 + h1 + margin * h1\n\n def isolate_value_rect(self, splits):\n if splits is None:\n return\n for sp in splits:\n img = sp['img']\n rects = sp['rects']\n bars = sp['bars']\n thresh_left_pos = bars[-1][0] + bars[-1][2]\n\n bar_colors = []\n for bar in bars:\n bar_colors.append(self.__get_rect_color(img=img, rect=bar))\n\n rect_colors = []\n for rect in rects:\n rect_colors.append(self.__get_rect_color(img=img, rect=rect))\n\n value_rects = []\n for i in range(len(rects)):\n min_dis = None\n min_level = None\n for j in range(len(bars)):\n if self.__same_level(rect1=bars[j], rect2=rects[i]) and \\\n self.__candi_height(bottom=bars[j], top=rects[i]) and \\\n rects[i][0] > thresh_left_pos:\n color_dis = self.__compare_colors(color1=bar_colors[j], color2=rect_colors[i])\n if min_dis is None or min_dis > color_dis:\n min_dis = color_dis\n min_level = j\n min_rect = rects[i]\n\n if min_dis is not None and min_dis < self.thresh_same_color:\n value_rects.append({'level': min_level,\n 'rect': min_rect})\n\n sp['value_rects'] = value_rects\n if self.debug:\n for sp in splits:\n show_img = sp['img'].copy()\n binary = sp['bin'].copy()\n value_rects = sp['value_rects']\n bars = sp['bars']\n for value_rect in value_rects:\n (x, y, w, h) = value_rect['rect']\n cv2.rectangle(show_img, (int(x), int(y)), (int(x + w), int(y + h)), (0, 0, 255), 2)\n for (x, y, w, h) in bars:\n cv2.rectangle(show_img, (int(x), int(y)), (int(x + w), int(y + h)), (255, 0, 0), -1)\n cv2.imshow(\"sp\", cv2.resize(show_img, (700, show_img.shape[0] * 700 // show_img.shape[1])))\n cv2.imshow(\"bin\", cv2.resize(binary, (700, show_img.shape[0] * 700 // show_img.shape[1])))\n cv2.waitKey(0)\n\n def isolate(self, img):\n img, mask, binary = self.iam.cvt_to_binary(img=img)\n\n rects = self.get_contours(img=img.copy(), mask=mask.copy())\n groups = self.classify_group(rects=rects)\n\n splits = self.split_img(img=img.copy(), binary=mask.copy(), groups=groups)\n self.isolate_value_rect(splits=splits)\n\n return splits\n\nif __name__ == '__main__':\n ir = IsolateRects(debug=True, init_show=True)\n\n # img = cv2.imread(\"../data/10_SOL170187_IMG_60_0000.JPG\")\n # img = cv2.imread(\"../data/29220_26423927_EPC_01_0000.jpg\")\n # img = cv2.imread(\"../data/9582_BAL102692_EPCGRAPH_01_0000.jpg\")\n # img = cv2.imread(\"../data/9487_3134585_EPCGRAPH_01_0000.jpg\")\n path = \"../data/images/151058_101697000116_EPCGRAPH_01_0000.jpg\"\n img = cv2.imread(path)\n ir.isolate(img=img)\n", "sub_path": "lib/isolate_rects.py", "file_name": "isolate_rects.py", "file_ext": "py", "file_size_in_byte": 15773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "lib.img_anno_manage.ImgAnnoManage", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.erode", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.erode", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 57, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 93, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 98, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 207, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 210, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 217, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 217, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 304, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 306, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 307, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 307, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 308, "usage_type": "call"}, {"api_name": "cv2.calcHist", "line_number": 317, "usage_type": "call"}, {"api_name": "cv2.calcHist", "line_number": 318, "usage_type": "call"}, {"api_name": "cv2.calcHist", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 323, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 330, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 331, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 332, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 385, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 387, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 388, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 388, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 389, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 389, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 390, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 411, "usage_type": "call"}]} +{"seq_id": "94264306", "text": "from kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.properties import StringProperty\nfrom kivy.uix.image import Image\nfrom kivy.uix.screenmanager import Screen, ScreenManager\nfrom kivy.graphics.texture import Texture\n\nimport cv2\nimport numpy as np\nimport pyramids\nimport pywt\nimport heartrate\nimport processing\n\nfrom kivy.clock import Clock\n\n(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')\n__version__ = \"1.0.0\"\n\n\ndef getROI(self, frame):\n classifier_face = cv2.CascadeClassifier(\"haarcascade_frontalface_alt0.xml\")\n img = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n faceRects_face = classifier_face.detectMultiScale(img, 1.2, 2, cv2.CASCADE_SCALE_IMAGE, (20, 20))\n if len(faceRects_face) > 0:\n print(\"ROI SUCESS\")\n # 检测到人脸\n # for faceRect_face in faceRects_face:\n x, y, w, h = faceRects_face[0]\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n # 获取图像x起点,y起点,宽,高\n boxh = int(float(h / 2.1))\n xmin = int(x + w / 6)\n ymin = int(y)\n boxw = int(w * 4 / 6)\n\n cv2.rectangle(frame, (xmin, ymin + boxh), (xmin + boxw, ymin + boxh + int(h / 5)), (0, 255, 0), 2)\n bbox = (int(xmin), int(ymin + boxh), int(boxw), int(h / 6))\n buf1 = cv2.flip(frame, 0)\n buf = buf1.tostring()\n texture1 = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')\n texture1.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\n self.texture = texture1\n return bbox, True\n else:\n print(\"ROI Failed\")\n buf1 = cv2.flip(frame, 0)\n buf = buf1.tostring()\n texture1 = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')\n texture1.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\n self.texture = texture1\n return (), False\n\n\ndef Tracking(out):\n # Frequency range for Fast-Fourier Transform\n freq_min = 0.8\n freq_max = 1.8\n # Build Laplacian video py ramid\n fps = 30\n frame_ct = len(out)\n lap_video = pyramids.build_video_pyramid(out)\n for i, video in enumerate(lap_video):\n if i == 0 or i == len(lap_video) - 1:\n continue\n # Eulerian magnification with temporal FFT filtering\n result, fft, frequencies = processing.fft_filter(video, freq_min, freq_max, fps)\n lap_video[i] += result\n\n # Collapse laplacian pyramid to generate final video\n\n amplified_frames = pyramids.collapse_laplacian_video_pyramid(lap_video, frame_ct)\n\n for frame in amplified_frames:\n cv2.imshow(\"frame\", frame)\n cv2.waitKey(20)\n\n average1 = []\n for frame in amplified_frames:\n average1.append(processing.Imeang(frame))\n\n coeffs = pywt.wavedec(average1, 'sym8', level=4) # 4阶小波分解\n yd4 = pywt.waverec(np.multiply(coeffs, [0, 1, 0, 0, 0]).tolist(), 'sym8')\n yd3 = pywt.waverec(np.multiply(coeffs, [0, 0, 1, 0, 0]).tolist(), 'sym8')\n CD = [yd3, yd4]\n final = pywt.waverec(CD, 'sym8')\n fft, frequencies = processing.fft_filter1(final, freq_min, freq_max, fps)\n # Calculate heart rate\n heart_rate = heartrate.find_heart_rate(fft, frequencies, freq_min, freq_max)\n heart_rate = round(heart_rate, 2)\n return heart_rate\n\n\nclass IndexPage(FloatLayout):\n def __init__(self, **kwargs):\n super(IndexPage, self).__init__(**kwargs)\n\n def page_go(*args):\n App.get_running_app().screen_manager.current = \"Video_page\"\n App.get_running_app().screen_manager.transition.direction = 'left'\n\n\nclass MiddlePage(BoxLayout):\n heartrate = StringProperty()\n\n def __init__(self, **kwargs):\n super(MiddlePage, self).__init__(**kwargs)\n self.heartrate = ''\n\n def show(self):\n heartbeat = Tracking(video)\n self.heartrate = \"heartrate: \" + str(heartbeat) + \" bpm\"\n\n def page_go(*args):\n App.get_running_app().screen_manager.current = \"Index_page\"\n App.get_running_app().screen_manager.transition.direction = 'left'\n\n\nclass KivyCamera(Image):\n def __init__(self, **kwargs):\n super(KivyCamera, self).__init__(**kwargs)\n self.capture = None\n self.clock_event = None\n\n def start(self, capture, fps=30):\n global video\n video = []\n self.video = []\n self.capture = capture\n self.count = 0\n global tracker\n tracker = cv2.TrackerMIL_create()\n\n self.clock_event = Clock.schedule_interval(self.update, 1.0 / fps)\n self.clock_event1 = Clock.schedule_interval(self.update1, 1.0 / fps)\n video = self.video\n\n def update(self, dt):\n ok, frame = self.capture.read()\n if ok:\n if self.count == 0:\n self.bbox, judge = getROI(self, frame)\n if self.bbox != ():\n tracker.init(frame, self.bbox)\n self.count = 1\n else:\n self.count = 0\n\n if self.count == 1:\n Clock.unschedule(self.clock_event)\n\n def update1(self, dt):\n ret, frame = self.capture.read()\n if ret:\n area1 = []\n # Update tracker\n ok, bbox = tracker.update(frame)\n if ok:\n self.count = self.count + 1\n # Draw bonding box\n p1 = (int(bbox[0]), int(bbox[1]))\n p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))\n area = frame[int(bbox[1]):int(bbox[1] + bbox[3]), int(bbox[0]):int(bbox[0] + bbox[2])]\n cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)\n area = cv2.resize(area, (250, 250))\n area1 = np.ndarray(shape=area.shape, dtype=\"float\")\n area1[:] = area * (1. / 255)\n self.video.append(area1)\n buf1 = cv2.flip(frame, 0)\n buf = buf1.tostring()\n texture1 = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')\n texture1.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\n self.texture = texture1\n self.bbox = bbox\n else:\n print(\"error\")\n self.count = self.count + 1\n # Draw bonding box\n p1 = (int(self.bbox[0]), int(self.bbox[1]))\n p2 = (int(self.bbox[0] + self.bbox[2]), int(self.bbox[1] + self.bbox[3]))\n area = frame[int(self.bbox[1]):int(self.bbox[1] + self.bbox[3]),\n int(self.bbox[0]):int(self.bbox[0] + self.bbox[2])]\n cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)\n area = cv2.resize(area, (250, 250))\n area1 = np.ndarray(shape=area.shape, dtype=\"float\")\n area1[:] = area * (1. / 255)\n self.video.append(area1)\n buf1 = cv2.flip(frame, 0)\n buf = buf1.tostring()\n texture1 = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')\n texture1.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\n self.texture = texture1\n\n if self.count == 600:\n self.back_index_1()\n self.stop()\n else:\n self.video = []\n self.count = 1\n\n def stop(self):\n Clock.unschedule(self.clock_event1)\n self.capture.release()\n\n def back_index_1(*args):\n App.get_running_app().screen_manager.current = \"Middle_Page\"\n App.get_running_app().screen_manager.transition.direction = 'left'\n\n\nclass VideoPage(FloatLayout):\n word = StringProperty()\n\n def __init__(self, **kwargs):\n super(VideoPage, self).__init__(**kwargs)\n self.word = \"The page is loading , please waiting...\"\n\n def dostart(self, *largs):\n self.word = \"\"\n self.capture = cv2.VideoCapture(0)\n self.ids.cv2cam_l.start(self.capture)\n\n\nclass heartDetectApp(App):\n def build(self):\n self.icon = \"./static/icon.ico\"\n self.title = \"光电容积脉搏波描记法测心率App\"\n self.load_kv(\"./index.kv\")\n self.load_kv(\"./middle.kv\")\n self.load_kv(\"./video.kv\")\n self.screen_manager = ScreenManager()\n pages = {\"Index_page\": IndexPage(), \"Video_page\": VideoPage(), \"Middle_Page\": MiddlePage()}\n for item, page in pages.items():\n self.default_page = page\n # 添加页面\n screen = Screen(name=item)\n screen.add_widget(self.default_page)\n # 向屏幕管理器添加页面\n self.screen_manager.add_widget(screen)\n return self.screen_manager\n\n\nif __name__ == \"__main__\":\n heartDetectApp().run()\n", "sub_path": "PycharmProjects/非接触式测量心率/test1.py", "file_name": "test1.py", "file_ext": "py", "file_size_in_byte": 8762, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "cv2.__version__.split", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.__version__", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.CascadeClassifier", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.CASCADE_SCALE_IMAGE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 40, "usage_type": "call"}, {"api_name": "kivy.graphics.texture.Texture.create", "line_number": 42, "usage_type": "call"}, {"api_name": "kivy.graphics.texture.Texture", "line_number": 42, "usage_type": "name"}, {"api_name": "cv2.flip", "line_number": 48, "usage_type": "call"}, {"api_name": "kivy.graphics.texture.Texture.create", "line_number": 50, "usage_type": "call"}, {"api_name": "kivy.graphics.texture.Texture", "line_number": 50, "usage_type": "name"}, {"api_name": "pyramids.build_video_pyramid", "line_number": 63, "usage_type": "call"}, {"api_name": "processing.fft_filter", "line_number": 68, "usage_type": "call"}, {"api_name": "pyramids.collapse_laplacian_video_pyramid", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 77, "usage_type": "call"}, {"api_name": "processing.Imeang", "line_number": 81, "usage_type": "call"}, {"api_name": "pywt.wavedec", "line_number": 83, "usage_type": "call"}, {"api_name": "pywt.waverec", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 84, "usage_type": "call"}, {"api_name": "pywt.waverec", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 85, "usage_type": "call"}, {"api_name": "pywt.waverec", "line_number": 87, "usage_type": "call"}, {"api_name": "processing.fft_filter1", "line_number": 88, "usage_type": "call"}, {"api_name": "heartrate.find_heart_rate", "line_number": 90, "usage_type": "call"}, {"api_name": "kivy.uix.floatlayout.FloatLayout", "line_number": 95, "usage_type": "name"}, {"api_name": "kivy.app.App.get_running_app", "line_number": 100, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 100, "usage_type": "name"}, {"api_name": "kivy.app.App.get_running_app", "line_number": 101, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 101, "usage_type": "name"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 104, "usage_type": "name"}, {"api_name": "kivy.properties.StringProperty", "line_number": 105, "usage_type": "call"}, {"api_name": "kivy.app.App.get_running_app", "line_number": 116, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 116, "usage_type": "name"}, {"api_name": "kivy.app.App.get_running_app", "line_number": 117, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 117, "usage_type": "name"}, {"api_name": "kivy.uix.image.Image", "line_number": 120, "usage_type": "name"}, {"api_name": "cv2.TrackerMIL_create", "line_number": 133, "usage_type": "call"}, {"api_name": "kivy.clock.Clock.schedule_interval", "line_number": 135, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 135, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.schedule_interval", "line_number": 136, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 136, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.unschedule", "line_number": 151, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 151, "usage_type": "name"}, {"api_name": "cv2.rectangle", "line_number": 165, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 167, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 170, "usage_type": "call"}, {"api_name": "kivy.graphics.texture.Texture.create", "line_number": 172, "usage_type": "call"}, {"api_name": "kivy.graphics.texture.Texture", "line_number": 172, "usage_type": "name"}, {"api_name": "cv2.rectangle", "line_number": 184, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 186, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 189, "usage_type": "call"}, {"api_name": "kivy.graphics.texture.Texture.create", "line_number": 191, "usage_type": "call"}, {"api_name": "kivy.graphics.texture.Texture", "line_number": 191, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.unschedule", "line_number": 203, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 203, "usage_type": "name"}, {"api_name": "kivy.app.App.get_running_app", "line_number": 207, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 207, "usage_type": "name"}, {"api_name": "kivy.app.App.get_running_app", "line_number": 208, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 208, "usage_type": "name"}, {"api_name": "kivy.uix.floatlayout.FloatLayout", "line_number": 211, "usage_type": "name"}, {"api_name": "kivy.properties.StringProperty", "line_number": 212, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 220, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 224, "usage_type": "name"}, {"api_name": "kivy.uix.screenmanager.ScreenManager", "line_number": 231, "usage_type": "call"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 236, "usage_type": "call"}]} +{"seq_id": "438251366", "text": "import os\nimport json\n\nfrom glob import glob\nimport pandas as pd\nimport nibabel as nib\nfrom shutil import copy, move\n\n\n# define function to copy non deidentified images to sourcedata/,\n# overwriting images in the bids root folder\ndef copy_no_deid(subject_label):\n # images\n path = os.path.join(args.bids_dir, \"sourcedata/bidsonym/sub-%s\"%subject_label)\n outfile = T1_file[T1_file.rfind('/')+1:T1_file.rfind('.nii')]+'_no_deid.nii.gz'\n if os.path.isdir(path) == True:\n copy(T1_file, os.path.join(path, outfile))\n else:\n os.makedirs(path)\n copy(T1_file, os.path.join(path, outfile))\n # meta-data\n path_task_meta = os.path.join(args.bids_dir, \"sourcedata/bidsonym/\")\n path_sub_meta = os.path.join(args.bids_dir, \"sourcedata/bidsonym/sub-%s\" % subject_label)\n list_task_meta_files = glob(os.path.join(args.bids_dir, '*json'))\n list_sub_meta_files = glob(os.path.join(args.bids_dir, subject_label, '*', '*.json'))\n for task_meta_data_file in list_task_meta_files:\n task_out = task_meta_data_file[task_meta_data_file.rfind('/') + 1:task_meta_data_file.rfind('.json')] + '_no_deid.json'\n copy(task_meta_data_file, os.path.join(path_task_meta, task_out))\n for sub_meta_data_file in list_sub_meta_files:\n sub_out = sub_meta_data_file[sub_meta_data_file.rfind('/') + 1:sub_meta_data_file.rfind('.json')] + '_no_deid.json'\n copy(sub_meta_data_file, os.path.join(path_sub_meta, sub_out))\n\n\ndef check_meta_data(bids_path, subject_label, prob_fields):\n # gather all image files\n list_subject_image_files = glob(os.path.join(bids_path, 'sub-' + subject_label, '*', '*nii.gz'))\n # gather all meta data files\n list_task_meta_files = glob(os.path.join(bids_path, '*json'))\n list_sub_meta_files = glob(os.path.join(bids_path, 'sub-' + subject_label, '*', '*.json'))\n list_meta_files = list_task_meta_files + list_sub_meta_files\n # define potentially problematic fields\n prob_fields = prob_fields\n # check image files, output .csv dataframe with found header information and if it might be problematic\n for subject_image_file in list_subject_image_files:\n #load image header\n header = nib.load(subject_image_file).header\n #create df with header information\n keys = []\n dat = []\n for key, data in zip(header.keys(), header.values()):\n keys.append(key)\n dat.append(data)\n header_df = pd.DataFrame({'meta_data_field': keys, 'data': dat, 'problematic': 'no'})\n #loop over df, checking if information might be problematic\n for index, row in header_df.iterrows():\n if any(i.lower() in row['meta_data_field'] for i in prob_fields):\n row['problematic'] = 'maybe'\n else:\n row['problematic'] = 'no'\n #save image specific df to sourcedata\n header_df.to_csv(os.path.join(bids_path, 'sourcedata/bidsonym', 'sub-%s' % subject_label, subject_image_file[subject_image_file.rfind('/') + 1:subject_image_file.rfind('.nii.gz')] + '_header_info.csv'), index=False)\n\n # check meta data files, output .csv dataframe with found information and if it might be problematic\n for meta_file in list_meta_files:\n #open meta data files and create df that contains the respective information\n with open(meta_file, 'r') as json_file:\n meta_data = json.load(json_file)\n keys = []\n info = []\n for key, inf in zip(meta_data.keys(), meta_data.values()):\n keys.append(key)\n info.append(inf)\n json_df = pd.DataFrame({'meta_data_field': keys, 'information': info, 'problematic': 'no'})\n #loop over df, checking if information might be problematic\n for index, row in json_df.iterrows():\n if any(i in row['meta_data_field'] for i in prob_fields):\n row['problematic'] = 'maybe'\n else:\n row['problematic'] = 'no'\n #save json specifci df to sourcedata\n json_df.to_csv(os.path.join(bids_path, 'sourcedata/bidsonym', 'sub-%s' % subject_label, meta_file[meta_file.rfind('/') + 1:meta_file.rfind('.json')] + '_json_info.csv'), index=False)\n\n\n# define function to remove certain fields from the meta-data files\n# after copying the original ones to sourcedata/\ndef del_meta_data(bids_path, subject_label, fields_del):\n\n #get all .json files for tasks and subjects, combine both lists\n list_task_meta_files = glob(os.path.join(bids_path, '*json'))\n list_sub_meta_files = glob(os.path.join(bids_path, 'sub-'+subject_label, '*', '*.json'))\n list_meta_files = list_task_meta_files + list_sub_meta_files\n\n #declare fields that should be deleted from the .json files\n fields_del = fields_del\n\n #provide information on workflow\n print('working on %s'%subject_label)\n print('found the following meta-data files:')\n print(*list_meta_files, sep='\\n')\n print('the following fields will be deleted:')\n print(*list_field_del, sep='\\n')\n\n #loop over meta data files and delete indicated fields, copying original file to sourcedata\n for meta_file in list_meta_files:\n with open(meta_file, 'r') as json_file:\n meta_data = json.load(json_file)\n for field in fields_del:\n meta_data[field] = 'deleted_by_bidsonym'\n with open(meta_file, 'w') as json_output_file:\n json.dump(meta_data, json_output_file, indent=4)\n", "sub_path": "bidsonym/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5518, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 19, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "nibabel.load", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 106, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "490721357", "text": "\"\"\"\n\n\"\"\"\n\nfrom itertools import combinations_with_replacement\n\ncity_temps = {\n \"Casa_Grande\": [76, 69, 60, 64, 69],\n \"Chandler\": [77, 68, 61, 65, 67],\n \"Flagstaff\": [46, 35, 33, 40, 44],\n \"Lake Havasu City\": [71, 65, 63, 66, 68],\n \"Sedona\": [62, 47, 45, 51, 56]\n}\n\nhotel_rates = {\n \"Motel 6\": 89,\n \"Best Western\": 109,\n \"Holiday Inn Express\": 115,\n \"Courtyard by Marriott\": 229,\n \"Residence Inn\": 199,\n \"Hampton Inn\": 209\n}\n\n\nHOTEL_BUDGET = int(850)\n\n\ndef averageperweek(dictkey, nameofdict):\n d = []\n y = []\n Temps = {}\n o = nameofdict\n for p in dictkey:\n t = o[p]\n y.append(max(t))\n b = (sum(y)/len(y))\n Temps[b] = p\n for keys in Temps:\n d.append(keys)\n d.sort(reverse= True)\n foo = sum(y)/len(y)\n return d, Temps, foo\ndef costdict(combs, referencedict):\n fres = {}\n uss = []\n for i in combs:\n bdg = []\n for tt in i:\n gg = referencedict[tt]\n bdg.append(gg)\n uss.append(sum(bdg))\n fres[i] = uss[-1]\n return fres\ndef finalbudget(dictkeys, refdict):\n global HOTEL_BUDGET\n jk = []\n for i in dictkeys:\n jk.append(i)\n min_cost = HOTEL_BUDGET\n for k in jk:\n cost = int(refdict[k])\n if (HOTEL_BUDGET - cost) < min_cost and HOTEL_BUDGET >= cost:\n min_cost = HOTEL_BUDGET - cost\n best_option = k\n return k\n# For whatever reason the commented out code below refused to work for me, thus the above code is borrowed from the combinatorics lecture we had right before break.\n# ttt = min(jk, key=lambda o: HOTEL_BUDGET - jk[o] if HOTEL_BUDGET >= jk[o] else HOTEL_BUDGET)\n# return ttt\nx = []\nz = {}\nif __name__ == \"__main__\":\n cities = list(city_temps.keys())\n hotels = list(hotel_rates.keys())\n tmp, dict, maxes = averageperweek(cities,city_temps)\n for e in tmp:\n x.append(dict[e])\n w = list(combinations_with_replacement(hotels, len(cities)))\n z = costdict(w, hotel_rates)\n zt = list(z.keys())\n zz = finalbudget(zt, z)\n #print(tmp)\n #print(dict)\n print(f'Here is your best route: {x} the average of the daily max temp. is {maxes}F')\n # ..\n print(f'To max out your hotel budget, stay at these hotels: {zz}, totaling ${z[zz]}')\n\n\n### This is the borrowed code block for the finalbudget function, I will however look into the cause of why my code was not working.\n\n# # if we didn't have the builtin min function .. we could do this\n# #\n# min_cost = BUDGET\n# best_option = []\n# for c in combs:\n# cost = cost_func(c)\n# if (BUDGET - cost) < min_cost and BUDGET >= cost:\n# min_cost = BUDGET - cost\n# best_option = c\n#\n# print(cost_func(best_option), best_option)", "sub_path": "project2/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2738, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "itertools.combinations_with_replacement", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "189981528", "text": "import os\nimport cv2\nimport numpy as np\nimport time\nfrom itertools import chain\n\ndef visualize_masks(labels, batch_size, image_shape,\n num_classes = 5):\n\n masks = []\n for label in range(1, num_classes + 1):\n masks.append(labels == label)\n\n labels_vis = np.zeros((batch_size,\n image_shape[0],\n image_shape[1],\n image_shape[2]), np.uint8)\n\n cmap = [[166, 206, 227],\n [178, 223, 138],\n [31, 120, 180],\n [51, 160, 44],\n [251, 154, 153],\n [227, 26, 28],\n [253, 191, 111],\n [255, 127, 0],\n [202, 178, 214],\n [106, 61, 154],\n [255, 255, 153],\n [177, 89, 40],\n [125, 125, 125]] # added a gray one. might not be perfect\n for i in range(num_classes):\n labels_vis[masks[i]] = cmap[i]\n\n return labels_vis\n\ndef mask_rcnn_unmold_cls_mask(mask, bbox, image_shape, idx, full_masks,\n box_masks, cls, compute_box_mask=False,\n dialate=True, threshold = 0.5):\n \"\"\"Converts a mask generated by the neural network into a format similar\n to it's original shape.\n mask: [height, width] of type float. A small, typically 28x28 mask.\n bbox: [y1, x1, y2, x2]. The box to fit the mask in.\n Returns a binary or weighted mask with the same size as the original image.\n \"\"\"\n y1, x1, y2, x2 = bbox\n if (x2 - x1) <= 0 or (y2 - y1) <= 0:\n return\n\n mask = cv2.resize(mask, (x2 - x1, y2 - y1)).astype(np.float32)\n\n thresh_mask = np.where(np.logical_and(mask >= threshold,\n cls > full_masks[y1:y2, x1:x2]),\n cls, full_masks[y1:y2, x1:x2]).astype(np.uint8)\n # Put the mask in the right location.\n full_masks[y1:y2, x1:x2] = thresh_mask\n\n if box_masks is not None:\n if dialate:\n dialate_frac = 0.15\n dy1 = max(int(y1 - dialate_frac * (y2 - y1)), 0)\n dx1 = max(int(x1 - dialate_frac * (x2 - x1)), 0)\n\n dy2 = min(int(y2 + dialate_frac * (y2 - y1)), image_shape[0])\n dx2 = min(int(x2 + dialate_frac * (x2 - x1)), image_shape[1])\n\n mask = cv2.resize(mask, (dx2 - dx1, dy2 - dy1)).astype(np.float32)\n box_masks[dy1:dy2, dx1:dx2] = np.where(mask >= 0, 1, 0).astype(np.bool)\n else:\n box_masks[y1:y2, x1:d2] = np.where(mask >= 0, 1, 0).astype(np.bool)\n\ndef mask_rcnn_single_mask(boxes, classes, scores, masks, image_shape,\n box_mask=False, box_threshold=0.5,\n mask_threshold=0.5):\n N = len(boxes)\n # Resize masks to original image size and set boundary threshold.\n full_masks = np.zeros((image_shape[0], image_shape[1]), dtype=np.uint8)\n box_masks = np.zeros((image_shape[0], image_shape[1]), dtype=np.bool)\n\n for i in range(N):\n if scores[i] < box_threshold:\n continue\n # Convert neural network mask to full size mask\n mask_rcnn_unmold_cls_mask(masks[i], boxes[i], image_shape,\n i, full_masks,\n box_masks, classes[i],\n compute_box_mask=box_mask,\n threshold=mask_threshold)\n return full_masks, box_masks\n\ndef batch_segmentation_masks(batch_size,\n image_shape,\n batch_boxes,\n batch_classes,\n batch_masks,\n batch_scores,\n batch_num_objects,\n compute_weight_masks,\n class_groups,\n mask_threshold=0.5,\n box_threshold=0.5,\n scale_boxes=True):\n h = image_shape[0]\n w = image_shape[1]\n\n seg_masks = np.zeros((batch_size, h, w), np.uint8)\n weight_masks = np.zeros((batch_size, h, w), np.bool)\n\n class_remap = {}\n for g in range(len(class_groups)):\n for c in class_groups[g]:\n class_remap[c] = g + 1\n\n batch_boxes = batch_boxes.copy()\n\n if scale_boxes and len(batch_boxes.shape) == 3:\n batch_boxes[:, :, 0] = batch_boxes[:, :, 0] * h\n batch_boxes[:, :, 2] = batch_boxes[:, :, 2] * h\n batch_boxes[:, :, 1] = batch_boxes[:, :, 1] * w\n batch_boxes[:, :, 3] = batch_boxes[:, :, 3] * w\n\n batch_boxes = batch_boxes.astype(np.int32)\n\n for b in range(batch_size):\n N = batch_num_objects[b]\n if N == 0:\n continue\n boxes = batch_boxes[b, :N, :]\n masks = batch_masks[b, :N, :, :]\n scores = batch_scores[b, :N]\n classes = batch_classes[b, :N]\n\n for i in range(classes.shape[0]):\n if classes[i] in class_remap:\n classes[i] = class_remap[classes[i]]\n else:\n classes[i] = 0\n\n idx = classes > 0\n boxes = boxes[idx]\n masks = masks[idx]\n classes = classes[idx]\n scores = scores[idx]\n\n full_masks, box_masks = mask_rcnn_single_mask(boxes, classes,\n scores, masks,\n image_shape,\n box_mask=compute_weight_masks,\n box_threshold=box_threshold,\n mask_threshold=mask_threshold)\n seg_masks[b] = full_masks\n weight_masks[b] = box_masks\n\n return seg_masks, weight_masks\n\n\nclass MaskRCNNStream:\n def __init__(self, video_stream_path, detections_path,\n start_frame=0, num_frames=None, stride=1,\n loop=False, resize=(1280, 720)):\n assert(os.path.isfile(video_stream_path))\n assert(os.path.isfile(detections_path))\n self.cap = cv2.VideoCapture(video_stream_path)\n self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.rate = int(self.cap.get(cv2.CAP_PROP_FPS))\n self.length = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n self.num_frames = num_frames\n self.resize = resize\n\n self.detections_path = detections_path\n self.detections = None\n self.stride = stride\n self.loop = loop\n\n assert(start_frame >= 0)\n self.start_frame = start_frame\n self.end_frame = self.length\n\n # Seek to the start frame\n if self.start_frame > 0:\n self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.start_frame)\n\n def __next__(self):\n if self.detections is None:\n self.detections = np.load(self.detections_path, allow_pickle=True)[()]\n self.labeled_frames = list(self.detections.keys())\n self.num_labeled_frames = len(self.labeled_frames)\n if self.num_frames is not None:\n assert(self.start_frame + self.num_frames <= self.length)\n self.end_frame = (self.start_frame + self.num_frames) - 1\n\n frame = None\n boxes = None\n classes = None\n scores = None\n masks = None\n labels_not_found = True\n while labels_not_found:\n frame_id = self.cap.get(cv2.CAP_PROP_POS_FRAMES)\n ret, frame = self.cap.read()\n #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, self.resize)\n frame = frame[...,::-1].copy()\n\n if (not ret) or (frame_id >= self.end_frame - 1):\n if self.loop:\n self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.start_frame)\n frame_id = self.start_frame\n else:\n self.detections = None\n raise StopIteration\n\n if frame_id in self.detections and frame_id % self.stride==0:\n boxes, classes, scores, masks = self.detections[frame_id]\n labels_not_found = False\n\n return frame, boxes, classes, scores, masks, scores.shape[0], frame_id\n\n def __iter__(self):\n return self\n\n def reset(self):\n self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.start_frame)\n\n next = __next__\n\n\nclass MaskRCNNMultiStream:\n def __init__(self, video_paths, detections_paths,\n start_frame=0, stride=1):\n self.streams = []\n self.stream_idx = 0\n self.num_streams = len(video_paths)\n print(video_paths)\n for d in range(len(video_paths)):\n input_stream = MaskRCNNStream(video_paths[d], detections_paths[d],\n start_frame=start_frame, stride=stride)\n self.streams.append(input_stream)\n\n def __next__(self):\n self.stream_idx = (self.stream_idx + 1) % self.num_streams\n frame, boxes, classes, scores, masks, scores.shape[0], frame_id\n\n return self.streams[self.stream_idx].__next__()\n\n def __iter__(self):\n return self\n\n next = __next__\n\nclass MaskRCNNSequenceStream:\n def __init__(self, video_paths, detections_paths,\n start_frame=0, stride=1):\n self.streams = []\n self.stream_idx = 0\n self.num_streams = len(video_paths)\n self.rate = 0\n for d in range(len(video_paths)):\n input_stream = MaskRCNNStream(video_paths[d], detections_paths[d],\n start_frame=start_frame, stride=stride,\n loop=False)\n self.streams.append(input_stream)\n #print(self.rate, input_stream.rate)\n if self.rate == 0:\n self.rate = input_stream.rate\n #else:\n # assert(self.rate == input_stream.rate)\n self.seq_stream = chain(*(self.streams))\n\n def __next__(self):\n return next(self.seq_stream)\n\n def __iter__(self):\n return self\n\n next = __next__\n\n\nif __name__ == \"__main__\":\n mask_rcnn_stream = MaskRCNNStream('/home/cfan/lvsdataset/driving1/driving1000.mp4',\n '/home/cfan/lvsdataset/driving1/detectron_large_mask_rcnn_1_driving1000.npy', num_frames=100)\n count = 0\n start = time.time()\n for s in mask_rcnn_stream:\n frame, boxes, classes, scores, masks, num_objects, frame_id = s\n count = count + 1\n end = time.time()\n print(count, (end - start)/count)", "sub_path": "dataloaders/maskrcnn_stream.py", "file_name": "maskrcnn_stream.py", "file_ext": "py", "file_size_in_byte": 10641, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 54, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 106, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 107, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 163, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 164, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 165, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 166, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 167, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_POS_FRAMES", "line_number": 182, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 186, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_POS_FRAMES", "line_number": 200, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 203, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_POS_FRAMES", "line_number": 208, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_POS_FRAMES", "line_number": 224, "usage_type": "attribute"}, {"api_name": "itertools.chain", "line_number": 269, "usage_type": "call"}, {"api_name": "time.time", "line_number": 284, "usage_type": "call"}, {"api_name": "time.time", "line_number": 288, "usage_type": "call"}]} +{"seq_id": "612375499", "text": "\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\n\nfrom com.kailin.api_image import api_image\nfrom com.kailin.api_file import api_file\nfrom matplotlib import pyplot\n\nimport numpy\n\nnumpy.random.seed(10)\n\n\ndef timestarp2string(dataset):\n result = []\n for i in range(len(dataset['日期'])):\n result.append(str(dataset['日期'][i])[:10].replace('-', ''))\n dataset['日期'] = result\n return dataset\n\n\ndef createDataset(dataset, look_back=1):\n dataX = []\n dataY = []\n for i in range(len(dataset) - look_back - 1):\n dataX.append(dataset[i:(i + look_back), 1])\n dataY.append(dataset[i + look_back, 1])\n print(dataset[i:(i + look_back), 1])\n return numpy.array(dataX), numpy.array(dataY)\n\n\ndef futureDataset(data):\n size = len(data)\n dataX = []\n dataX.append(data[size - 2:size - 1])\n return numpy.array(dataX)\n\n\nstockcode = '0050'\npathXlsx = api_file.dataPath + stockcode + '.xlsx'\npathh5 = api_file.dataPath + stockcode + '.h5'\n\nstock2330 = api_file.readExcel(pathXlsx)\nstock2330_X = stock2330['日期'].values\nstock2330_Y = stock2330['收盤'].values\nstock2330_XY = timestarp2string(stock2330)[['日期', '收盤']].values.astype('float64')\n\nlook_back = 5\nlook_predict = len(stock2330_XY) - 20\ntrain, test = stock2330_XY[:look_predict], stock2330_XY[look_predict:]\ntrainX, trainY = createDataset(train, look_back)\ntestX, testY = createDataset(test, look_back)\nprint(testX)\n\nmodel = Sequential()\n# model.add(LSTM(batch_input_shape=(look_back, len(trainX), look_back),\n# output_dim=look_back * 10,\n# return_sequences=True,\n# stateful=True,\n# dropout=0.3))\nmodel.add(Dense(input_dim=look_back, units=look_back * 10, activation='relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(units=8))\nmodel.add(Dense(units=1))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\n\ntry:\n api_file.loadMode(model, pathh5)\nexcept:\n history = model.fit(trainX, trainY, nb_epoch=1000, batch_size=10, validation_split=0.1, verbose=2)\n api_image.showTrainHistory(history, 'loss', 'val_loss')\n trainScore = model.evaluate(trainX, trainY, verbose=0)\n print('Train Score ', trainScore)\n testScore = model.evaluate(testX, testY, verbose=0)\n print('Test Score ', testScore)\n api_file.saveMode(model, pathh5)\n\ntrainPredict = model.predict(trainX)\ntestPredict = model.predict(testX)\n\npyplot.plot(stock2330_X, stock2330_Y)\npyplot.plot(stock2330_X[look_back:len(trainPredict) + look_back], trainPredict)\npyplot.plot(stock2330_X[len(trainPredict) + look_back * 2 + 1:len(stock2330_X) - 1], testPredict)\n# pyplot.show()\n\ntemp = []\nday = 10\nfor i in range(day):\n temp.append(stock2330_Y[len(stock2330_Y) - day + i])\nfor i in range(day):\n data = []\n data.append(temp[-look_back:])\n data = numpy.array(data)\n predict = model.predict(data)\n print('future day : ', day, '\\n', predict)\n temp.append(predict[0][0])\n", "sub_path": "tensorflow/com/kailin/e15.py", "file_name": "e15.py", "file_ext": "py", "file_size_in_byte": 2972, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "numpy.random.seed", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "com.kailin.api_file.api_file.dataPath", "line_number": 40, "usage_type": "attribute"}, {"api_name": "com.kailin.api_file.api_file", "line_number": 40, "usage_type": "name"}, {"api_name": "com.kailin.api_file.api_file.dataPath", "line_number": 41, "usage_type": "attribute"}, {"api_name": "com.kailin.api_file.api_file", "line_number": 41, "usage_type": "name"}, {"api_name": "com.kailin.api_file.api_file.readExcel", "line_number": 43, "usage_type": "call"}, {"api_name": "com.kailin.api_file.api_file", "line_number": 43, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 64, "usage_type": "call"}, {"api_name": "com.kailin.api_file.api_file.loadMode", "line_number": 68, "usage_type": "call"}, {"api_name": "com.kailin.api_file.api_file", "line_number": 68, "usage_type": "name"}, {"api_name": "com.kailin.api_image.api_image.showTrainHistory", "line_number": 71, "usage_type": "call"}, {"api_name": "com.kailin.api_image.api_image", "line_number": 71, "usage_type": "name"}, {"api_name": "com.kailin.api_file.api_file.saveMode", "line_number": 76, "usage_type": "call"}, {"api_name": "com.kailin.api_file.api_file", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "504228683", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2018, Ansible Project\n# Copyright: (c) 2018, Abhijeet Kasurde \n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['deprecated'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_category_facts\ndeprecated:\n removed_in: '2.13'\n why: Deprecated in favour of C(_info) module.\n alternative: Use M(vmware_category_info) instead.\nshort_description: Gather facts about VMware tag categories\ndescription:\n- This module can be used to gather facts about VMware tag categories.\n- Tag feature is introduced in vSphere 6 version, so this module is not supported in earlier versions of vSphere.\n- All variables and VMware object names are case sensitive.\nversion_added: '2.7'\nauthor:\n- Abhijeet Kasurde (@Akasurde)\nnotes:\n- Tested on vSphere 6.5\nrequirements:\n- python >= 2.6\n- PyVmomi\n- vSphere Automation SDK\nextends_documentation_fragment: vmware_rest_client.documentation\n'''\n\nEXAMPLES = r'''\n- name: Gather facts about tag categories\n vmware_category_facts:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n delegate_to: localhost\n register: all_tag_category_facts\n\n- name: Gather category id from given tag category\n vmware_category_facts:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n delegate_to: localhost\n register: tag_category_results\n\n- set_fact:\n category_id: \"{{ item.category_id }}\"\n loop: \"{{ tag_category_results.tag_category_facts|json_query(query) }}\"\n vars:\n query: \"[?category_name==`Category0001`]\"\n- debug: var=category_id\n\n'''\n\nRETURN = r'''\ntag_category_facts:\n description: metadata of tag categories\n returned: always\n type: list\n sample: [\n {\n \"category_associable_types\": [],\n \"category_cardinality\": \"MULTIPLE\",\n \"category_description\": \"awesome description\",\n \"category_id\": \"urn:vmomi:InventoryServiceCategory:e785088d-6981-4b1c-9fb8-1100c3e1f742:GLOBAL\",\n \"category_name\": \"Category0001\",\n \"category_used_by\": []\n },\n {\n \"category_associable_types\": [\n \"VirtualMachine\"\n ],\n \"category_cardinality\": \"SINGLE\",\n \"category_description\": \"another awesome description\",\n \"category_id\": \"urn:vmomi:InventoryServiceCategory:ae5b7c6c-e622-4671-9b96-76e93adb70f2:GLOBAL\",\n \"category_name\": \"template_tag\",\n \"category_used_by\": []\n }\n ]\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.vmware_rest_client import VmwareRestClient\n\n\nclass VmwareCategoryFactsManager(VmwareRestClient):\n def __init__(self, module):\n super(VmwareCategoryFactsManager, self).__init__(module)\n self.category_service = self.api_client.tagging.Category\n\n def get_all_tag_categories(self):\n \"\"\"Retrieve all tag category information.\"\"\"\n global_tag_categories = []\n for category in self.category_service.list():\n category_obj = self.category_service.get(category)\n global_tag_categories.append(\n dict(\n category_description=category_obj.description,\n category_used_by=category_obj.used_by,\n category_cardinality=str(category_obj.cardinality),\n category_associable_types=category_obj.associable_types,\n category_id=category_obj.id,\n category_name=category_obj.name,\n )\n )\n\n self.module.exit_json(changed=False, tag_category_facts=global_tag_categories)\n\n\ndef main():\n argument_spec = VmwareRestClient.vmware_client_argument_spec()\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)\n\n vmware_category_facts = VmwareCategoryFactsManager(module)\n vmware_category_facts.get_all_tag_categories()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "env/lib/python3.9/site-packages/ansible/modules/cloud/vmware/_vmware_category_facts.py", "file_name": "_vmware_category_facts.py", "file_ext": "py", "file_size_in_byte": 4182, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "ansible.module_utils.vmware_rest_client.VmwareRestClient", "line_number": 99, "usage_type": "name"}, {"api_name": "ansible.module_utils.vmware_rest_client.VmwareRestClient.vmware_client_argument_spec", "line_number": 124, "usage_type": "call"}, {"api_name": "ansible.module_utils.vmware_rest_client.VmwareRestClient", "line_number": 124, "usage_type": "name"}, {"api_name": "ansible.module_utils.basic.AnsibleModule", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "538473668", "text": "import MOODS.scan\nimport MOODS.tools\nimport MOODS.parsers\nimport os\nimport sys\nimport argparse\nimport pandas as pd\nimport seaborn as sns\nimport pybedtools\nimport math\nimport numpy as np\nimport scipy\nimport glob\nimport time\nfrom tqdm import tqdm\n\nfrom itertools import groupby, chain\n\nMODES = [\"MODE1\", \"MODE2\", \"MODE3\", \"MODE4\"]\nMODES = pd.DataFrame(MODES)\n\ndef a_bin_parse(x):\n BIN_FILE = []\n peaks = x[\"ID\"]\n bins = x['bins']\n start = x['Start']\n chrom = str(x['Chr'])\n bins_zero = bins - 1\n counter = 0\n\n print (\"Binning BED: Writing Bins for: \" + peaks)\n for i in tqdm(range(int(bins_zero))):\n bin_track = counter * 100\n bin_start = start + bin_track\n bin_end = bin_start + 99\n\n tmp3 = chrom, bin_start, bin_end, peaks, bins\n\n BIN_FILE.append(tmp3)\n\n counter = counter + 1\n\n return BIN_FILE\n\ndef bed_intersect(x, y, z):\n print (\"BED Intersect: Importing BED1\")\n a = pybedtools.BedTool(x)\n\n print (\"BED Intersect: Importing BED2\")\n b = pybedtools.BedTool(y)\n\n print (\"BED Intersect: Intersecting BEDS\")\n a_and_b = a.intersect(b, wa=True, wb=True)\n\n print (\"BED Intersect: Writing BEDS\")\n c = a_and_b.moveto(\"TF_domain.bed\")\n\n print (\"BED Intersect: Creating BED files\")\n df = c.to_dataframe()\n\n if z == True:\n return df\n else:\n return c\n\ndef bin_group_collect(x, df, path, files_list):\n f_name = \"DHS_CHIP_\" + x + \".txt\"\n files_list.append(f_name)\n frame = pd.DataFrame()\n list_df = []\n\n #create a dataframe of the TFs that have the same MODE\n print (\"Bin Rerence Bed: Framing \" + x + \" BED files for processing\")\n mode_df = df[df.MODE == x]\n\n # create a column with the name of the bed file for each MODE\n print (\"Bin Rerence Bed: Extracting extensions\")\n mode_df[\"bed_name\"] = mode_df[\"TF_Name\"] + \"_\" + mode_df[\"MODE\"] + \".bed\"\n\n # Create a list of bed files the concactenate\n print (\"Bin Rerence Bed: Extracting unique BED files\")\n bed_list = mode_df[\"bed_name\"].unique().tolist()\n bed_list.append(path)\n\n for fname in bed_list:\n print (\"Bin Rerence Bed: Creating DF of \" + fname)\n bin_df = pd.read_csv(fname, sep=\"\\t\", usecols=[0, 1, 2], header=None)\n list_df.append(bin_df)\n\n print (\"Bin Rerence Bed: Concatenating BED Dataframes\")\n frame = pd.concat(list_df)\n\n print (\"Bin Rerence Bed: Sorting: \" + f_name)\n frame.sort_values(by=[0, 1, 2], inplace=True)\n\n print (\"Bin Rerence Bed: Merging overlaps: \" + f_name)\n a = pybedtools.BedTool.from_dataframe(frame)\n c = a.merge()\n df = c.moveto(f_name)\n\n print (\"Bin Rerence Bed: Creating DF of: \" + f_name)\n df = pd.read_csv(f_name, sep=\"\\t\", header=None)\n\n df.columns = [\"Chr\", \"Start\", \"Stop\"]\n\n print (\"Bin Rerence Bed: Calculating BIN length\")\n df[\"len\"] = df[\"Stop\"] - df[\"Start\"] + 1\n\n print (\"Bin Rerence Bed: Calculating BIN number\")\n df[\"bins\"] = df[\"len\"]/100\n\n print (\"Bin Rerence Bed: Applying ceilings to BIN numbers\")\n df[\"bins\"] = df[\"bins\"].apply(math.ceil)\n df[\"ID\"] = df[\"Chr\"] + \"_\" + df[\"Start\"].apply(str) + \"_\" + df[\"Stop\"].apply(str)\n\n name = f_name + \"_BINS.bed\"\n\n print (\"Binning BED: Working on: \" + f_name)\n\n BIN_FILE = df.apply(a_bin_parse, axis=1)\n print (\"Binning BED: Writing : \" + f_name)\n\n BIN_FILE.to_csv(name, sep=\"\\t\", header=False, index=False)\n\ndef dict_TF_df(x):\n df = pd.DataFrame()\n\n df[\"Motif\"] = x[\"Motif_ID\"]\n df[\"TF\"] = x[\"TF_Name\"]\n\n df.set_index(\"Motif\")\n\n dicted = dict(zip(df.Motif, df.TF))\n\n return dicted\n\ndef intersect_bin(x):\n for m in MODES:\n f_name = \"DHS_CHIP_\" + m + \".txt_BINS.bed\"\n\n y = glob.glob(\"*\" + m + \".bed\")\n\n print (\"Intersect BED: List to intersect: \" + y)\n\n a = pybedtools.BedTool(f_name)\n b = pybedtools.BedTool(y)\n\n print (\"Intersect BED: Working on: \" + f_name)\n\n a_and_b = a.intersect(b, wa=True, wb=True)\n\n c = a_and_b.moveto(\"CHIP_DHS_RE_SUB_BINS_\" + m + \".bed\")\n\n d = pybedtools.BedTool(x)\n\n print (\"Intersect BED: Working on: CHIP\")\n\n a_and_d = a.intersect(d, wa=True, wb=True)\n\n e = a_and_d.moveto(\"MOODS_DHS_RE_SUB_BINS_\" + m + \".bed\")\n\ndef make_set_dir(x):\n cwd = os.getcwd()\n\n dir = cwd + \"/\" + x\n\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n os.chdir(dir)\n\ndef merge_replicate_BEDS(x, y):\n replicate_groups = x[\"TF_Name\"].unique()\n\n rep_group_counts = x.groupby([\"TF_Name\", \"MODE\"]).count()\n rep_group_counts.drop([0], axis=1, inplace=True)\n rep_group_counts.reset_index(inplace=True)\n\n rep_group_replicates = rep_group_counts[rep_group_counts.Sample_Name > 1]\n rep_group_singles = rep_group_counts[rep_group_counts.Sample_Name == 1]\n rep_group_all = rep_group_counts[rep_group_counts.Sample_Name >= 1]\n\n unique_TF_Rep = rep_group_replicates[\"TF_Name\"].unique()\n unique_TF_single = rep_group_singles[\"TF_Name\"].unique()\n unique_TF_all = rep_group_all[\"TF_Name\"].unique()\n\n for i in unique_TF_Rep:\n tf_df = x[x.TF_Name == i]\n\n for j in MODES:\n mode_df = tf_df[tf_df.MODE == j]\n\n mode_paths = mode_df[\"file_path\"]\n\n tf_filename = i + \"_\" + j + \".bed\"\n\n print (\"Merging \" + tf_filename)\n\n with open(tf_filename, 'w') as outfile:\n for fname in mode_paths:\n with open(fname) as infile:\n outfile.write(infile.read())\n\n df_sort = pd.read_csv(tf_filename, sep=\"\\t\", header=None, usecols=[0,1,2,3])\n df_sort = df_sort[[0,1,2,3]]\n df_sort.sort_values(by=[0, 1, 2], inplace=True)\n\n df_sort.to_csv(tf_filename, sep=\"\\t\", index=False, header=False)\n\n a = pybedtools.BedTool(tf_filename)\n c = a.merge()\n d = c.moveto(tf_filename)\n\n for k in unique_TF_single:\n tf_df = x[x.TF_Name == k]\n\n for l in MODES:\n mode_df = tf_df[tf_df.MODE == l]\n\n mode_paths = mode_df[\"file_path\"]\n\n tf_filename = k + \"_\" + l + \".bed\"\n print (\"Parsing TF single file \" + tf_filename)\n with open(tf_filename, 'w') as outfile:\n for fname in mode_paths:\n with open(fname) as infile:\n outfile.write(infile.read())\n\ndef parse_CHIP(x, y):\n df = pd.DataFrame.from_dict(x)\n\n df[\"Basename\"] = df[0].apply(os.path.basename)\n df[\"File_ext\"] = df.Basename.str.split(\"_\").str[-1]\n df[\"MODE\"] = df.File_ext.str.split(\".\").str[0]\n df[\"Sample_Name\"] = df.Basename.str.split(\"_\").str[0]\n\n df.drop(\"File_ext\", axis=1, inplace=True)\n df.drop(\"Basename\", axis=1, inplace=True)\n\n dicted = dict(zip(y.Sample_Name, y.TF))\n\n df[\"TF_Name\"] = df[\"Sample_Name\"].map(dicted)\n df.dropna(inplace=True)\n df[\"file_path\"] = df[0]\n\n return df\n\ndef parse_GEOS(x):\n df = pd.read_csv(x, sep=\"\\t\", header=None, usecols=[1, 13], names=[\"Sample_Name\", \"info\"])\n\n df[\"TF\"] = df[\"info\"].str.split(\":\").str[3]\n df[\"Type\"] = df[\"info\"].str.split(\":\").str[1]\n\n df.drop([\"info\"], axis=1, inplace=True)\n\n df = df[df.Type != \"viral_protein\"]\n histone_markers_list = [\"H3K27ac\", \"H3K27me3\", \"H3K36me3\", \"H3K4me1\", \"H3K4me2\", \"H3K4me3\", \"H3K9ac\", \"H3K9me3\", \"H3K79me2\"]\n\n for i in histone_markers_list:\n df = df[df.TF != i]\n\n return df\n\ndef parse_moods(x, y):\n print (\"Parsing MOODS: Reading in CSV\")\n df = pd.read_csv(x, header=None, sep=\"|\")\n\n print (\"Parsing MOODS: Replacing some extensions\")\n df[1] = df[1].str.replace('_JASPAR.txt.pfm', '')\n\n print (\"Parsing MOODS: Dropping a column\")\n df.drop(6, axis=1, inplace=True)\n\n print (\"Parsing MOODS: Splitting and creating new frame\")\n df_tmp1 = df[0].str.split(\":\", expand=True)\n\n print (\"Parsing MOODS: Splitting and creating new frame2\")\n df_tmp2 = df_tmp1[1].str.split(\"-\", expand=True)\n\n print (\"Parsing MOODS: Dropping another column\")\n df.drop(columns=0, inplace=True)\n\n print (\"Parsing MOODS: Setting Values\")\n df[\"chr\"] = df_tmp1[0]\n df[\"start\"] = df_tmp2[0]\n df[\"stop\"] = df_tmp2[1]\n\n print (\"Parsing MOODS: Deleting unused frames\")\n del df_tmp1\n del df_tmp2\n\n print (\"Parsing MOODS: Setting column names\")\n df.columns = [\"MOTIF_ID\", \"TF_POS\", \"STRAND\", \"MATCH_SCORE\", \"MOTIF_SEQ\", \"chr\", \"start\", \"stop\"]\n\n print (\"Parsing MOODS: Calculating TF motif start positions\")\n df[\"TF_start\"] = df[\"start\"].apply(int) + 1 + df[\"TF_POS\"]\n\n print (\"Parsing MOODS: Calculating TF motif end positions\")\n df[\"TF_end\"] = df[\"TF_start\"] + df[\"MOTIF_SEQ\"].str.len() - 1\n\n print (\"Parsing MOODS: Extracting Peak ID\")\n df[\"PEAK_ID\"] = df[\"chr\"] + \"_\" + df[\"start\"].map(str) + \"_\" + df[\"stop\"].map(str)\n\n print (\"Parsing MOODS: Extracting Motif ID\")\n df[\"MOTIF_POS\"] = df[\"chr\"] + \"_\" + df[\"TF_start\"].map(str) + \"_\" + df[\"TF_end\"].map(str)\n\n print (\"Parsing MOODS: Calculating motif length\")\n df[\"MOTIF_LEN\"] = df[\"TF_end\"] - df[\"TF_start\"] + 1\n\n print (\"Parsing MOODS: Mapping TF names\")\n df[\"TF_Name\"] = df[\"MOTIF_ID\"].map(y)\n\n print (\"Parsing MOODS: Dropping unused columns\")\n df.drop(columns=[\"TF_POS\", \"MOTIF_ID\", \"MATCH_SCORE\", \"MOTIF_SEQ\", \"STRAND\", \"start\", \"stop\"], inplace=True)\n\n return df\n\ndef percentile_parse(x, y):\n for i in x[\"file_path\"]:\n df = pd.read_csv(str(i), sep=\"\\t\", header=None)\n col_num = len(df.columns)\n base = os.path.basename(i)\n\n if col_num == 11:\n per_len = (len(df[7]))/y\n\n top_sorted = df[7].sort_values(ascending=False).head(per_len)\n\n df.sort_values(by=[7], ascending=False, inplace=True)\n\n df = df.head(per_len)\n\n df.to_csv(str(base))\n\n elif col_num == 13:\n per_len = (len(df[12]))/4\n\n top_sorted = df[12].sort_values(ascending=False).head(per_len)\n\n df.sort_values(by=[12], ascending=False, inplace=True)\n\n df = df.head(per_len)\n\n df.to_csv(str(base))\n", "sub_path": "bin/MOTIF_ANALYSIS/snippets.py", "file_name": "snippets.py", "file_ext": "py", "file_size_in_byte": 10059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "pandas.DataFrame", "line_number": 20, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 32, "usage_type": "call"}, {"api_name": "pybedtools.BedTool", "line_number": 47, "usage_type": "call"}, {"api_name": "pybedtools.BedTool", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 87, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 91, "usage_type": "call"}, {"api_name": "pybedtools.BedTool.from_dataframe", "line_number": 97, "usage_type": "call"}, {"api_name": "pybedtools.BedTool", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 102, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 126, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 141, "usage_type": "call"}, {"api_name": "pybedtools.BedTool", "line_number": 145, "usage_type": "call"}, {"api_name": "pybedtools.BedTool", "line_number": 146, "usage_type": "call"}, {"api_name": "pybedtools.BedTool", "line_number": 154, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 168, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 170, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 204, "usage_type": "call"}, {"api_name": "pybedtools.BedTool", "line_number": 210, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 230, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 230, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 232, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 249, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 266, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 320, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 322, "usage_type": "call"}, {"api_name": "os.path", "line_number": 322, "usage_type": "attribute"}]} +{"seq_id": "565169714", "text": "from ProjectInterface.Examples.ReadAndAssignment import *\r\nimport requests\r\nimport json\r\nfrom util.GetTestData import *\r\n\r\nclass send_and_receive():\r\n\r\n test_data_path = \"D:/TK/AutomatedTestingFramework/ProjectInterface/Examples/test_data/Examples.xlsx\"\r\n test_data_sheet = \"test_data\"\r\n GetTestData = GetTestData(test_data_path, test_data_sheet)\r\n\r\n def __init__(self, transaction, line):\r\n # self.GetTestData = GetTestData(test_data_path, test_data_sheet)\r\n # self.GetTestData = MessageDataProcessing(transaction, line)\r\n self.sheet = self.GetTestData.pe.get_sheet_by_name(send_and_receive.test_data_sheet)\r\n\r\n self.MessageDataProcessing = MessageDataProcessing(transaction, line)\r\n # self.hebao_url = read_excel_conf_ini(\"微信出单\" ,\"hebao_url\")\r\n # self.chudan_url = read_excel_conf_ini(\"微信出单\" ,\"chudan_url\")\r\n self.hebao_url = \"http://weit.tk.cn/wesure/rest/healthNoticeTest/testFreePolicyV2\"\r\n # self.chudan_url = \"\"\r\n self.case_name = self.MessageDataProcessing.case_name\r\n\r\n\r\n\r\n def hebao_message(self):\r\n self.send_message = self.MessageDataProcessing.hebao_message()\r\n # self.send_message[\"name\"] = self.send_message[\"insureName\"]\r\n self.send_message[\"credentialNo\"] = self.send_message[\"insureCredentialNo\"]\r\n return self.send_message\r\n\r\n def chudan_message(self):\r\n pass\r\n\r\n def send(self, data):\r\n # headers = {\"Content-Type\":\"text/xml\"}\r\n # postJsonResponse = requests.post(url=self.hebao_url, headers=headers, data=json.dumps(data))\r\n postJsonResponse = requests.post(url=self.hebao_url, data=json.dumps(data))\r\n # print(postJsonResponse.text.encode(\"utf8\"))\r\n # postJsonResponse = postJsonResponse.status_code\r\n postJsonResponse = postJsonResponse.text\r\n\r\n self.postJsonResponse = postJsonResponse\r\n\r\n send_message = open(os.getcwd() + \"/send_message/\" + self.case_name + \"_send.txt\", \"w\")\r\n send_message.write(str(data))\r\n send_message.close()\r\n logger_info(\"|发送报文-\" + str(data))\r\n\r\n def receive(self, line):\r\n\r\n line = line\r\n self.receive = self.postJsonResponse\r\n self.receive = json.loads(self.receive)\r\n\r\n send_message = open(os.getcwd() + \"/receive_message/\" + self.case_name + \"_receive.txt\", \"w\")\r\n send_message.write(str(self.receive))\r\n send_message.close()\r\n\r\n resultMsg = self.receive[\"resultMsg\"]\r\n logger_info(\"|返回报文-\" + str(self.receive))\r\n\r\n\r\n if resultMsg == \"成功\":\r\n policyNo = self.receive[\"responseData\"][\"policyNo\"]\r\n\r\n self.GetTestData.pe.write_cell(\r\n self.sheet, resultMsg, rowNo=line, colsNo=22, style=\"green\"\r\n )\r\n self.GetTestData.pe.write_cell(\r\n self.sheet, policyNo, rowNo=line, colsNo=23, style=\"green\"\r\n )\r\n self.GetTestData.pe.write_cell_current_time(\r\n self.sheet, rowNo=line, colsNo=24, style=\"green\"\r\n )\r\n else:\r\n self.MessageDataProcessing.GetTestData.pe.write_cell(\r\n self.sheet, resultMsg, rowNo=line, colsNo=22, style=\"red\"\r\n )\r\n self.GetTestData.pe.write_cell_current_time(\r\n self.sheet, rowNo=line, colsNo=24, style=\"red\"\r\n )\r\n\r\n # if resultMsg ==\"成功\":\r\n # # 保单号 写excel\r\n # self.MessageDataProcessing.GetTestData.pe.write_cell(self.MessageDataProcessing.GetTestData.test_data,\r\n # self.receive, rowNo=line, colsNo=22, style=\"green\")\r\n # else:\r\n # self.MessageDataProcessing.GetTestData.pe.write_cell(self.MessageDataProcessing.GetTestData.test_data,\r\n # self.receive, rowNo=line, colsNo=22, style=\"green\")\r\n #\r\n # self.MessageDataProcessing.GetTestData.pe.write_cell(self.MessageDataProcessing.GetTestData.test_data,\r\n # self.receive, rowNo=line, colsNo=23, style=\"red\")\r\n\r\n\r\nif __name__ == '__main__':\r\n send_and_receive = send_and_receive(\"hebao\", 7)\r\n hebao_message = send_and_receive.hebao_message()\r\n hebao_message_send = send_and_receive.send(hebao_message)\r\n hebao_message_receive = send_and_receive.receive()\r\n\r\n # send_message_deal = send_message_deal(\"hebao\", 7)\r\n # hebao_message = send_message_deal.hebao_message()\r\n", "sub_path": "pythoncode/AutomatedTestingFramework/ProjectInterface/Examples/SendAndReceive.py", "file_name": "SendAndReceive.py", "file_ext": "py", "file_size_in_byte": 4561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "requests.post", "line_number": 38, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 38, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "594419654", "text": "import json\nfrom datetime import datetime\n\nfrom cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers\nfrom cryptography.hazmat.backends import default_backend\n\nfrom verktyg import Response\nfrom verktyg import exceptions as vte\n\nfrom verktyg import Dispatcher\nfrom verktyg.views import expose_json, expose\n\nfrom pope_server_common.json import parse_bigint, render_datetime\nfrom pope_server_common.auth import require_auth\nfrom pope_signing_service.exceptions import NotFoundError\nfrom pope_signing_service import schemata\nfrom pope_signing_service.views import _dummy_url_for\nfrom pope_signing_service.certificates import (\n list_certificates, get_certificate, create_certificate,\n)\n\nimport logging\nlog = logging.getLogger(\"pope_signing_service\")\n\n\nviews = Dispatcher()\n\n\ndef _render_certificate_record(certificate, *, url_for=_dummy_url_for):\n response = {\n 'id': certificate.id,\n 'url': url_for('certificate', certificate_id=certificate.id),\n\n 'fingerprint': certificate.fingerprint,\n\n 'valid_from': render_datetime(certificate.valid_from),\n 'valid_to': render_datetime(certificate.valid_from),\n }\n\n if certificate.revoked:\n response['revoked'] = certificate.revoked\n\n return response\n\n\n@expose_json(views, 'certificates', methods=set('GET'))\n@require_auth\ndef list_certificates_view(app, req):\n now = datetime.utcnow()\n\n return [\n _render_certificate_record(certificate)\n for certificate in list_certificates(req.db_session, now=now)\n ]\n\n\n@expose_json(views, 'certificates', methods=['POST'])\n@require_auth\ndef create_certificates_view(app, req):\n now = datetime.utcnow()\n\n # certificate created by admin\n data = json.loads(req.get_data(as_text=True))\n schemata.create_certificate_request.validate(data)\n\n exponent = parse_bigint(data['jwk']['e'])\n modulus = parse_bigint(data['jwk']['n'])\n public_key = RSAPublicNumbers(\n exponent, modulus\n ).public_key(backend=default_backend())\n\n certificate = create_certificate(\n req.db_session, app.signing_backend,\n subject='client.example.com', public_key=public_key,\n ttl=14, now=now\n )\n\n response = Response(\n json.dumps(_render_certificate_record(\n certificate, url_for=app.url_for\n )),\n content_type='application/json', status=201,\n headers={\n 'Location': app.url_for(\n 'certificate', certificate_id=certificate.id\n ),\n }\n )\n req.db_session.commit()\n\n return response\n\n\n@expose_json(views, 'certificate', methods=['DELETE'])\n@require_auth\ndef revoke_certificates_view(app, req, *, certificate_id):\n raise NotImplementedError()\n\n\n@expose_json(views, 'certificate-by-fingerprint', methods=['GET'])\ndef get_certificate_by_fingerprint_view(app, req, *, fingerprint):\n now = datetime.utcnow()\n\n try:\n certificate = get_certificate(\n req.db_session, now=now, fingerprint=fingerprint\n )\n except NotFoundError:\n raise vte.NotFound()\n\n response = Response(\n json.dumps(_render_certificate_record(\n certificate, url_for=app.url_for\n )),\n content_type='application/json', status=301,\n headers={\n 'Location': app.url_for(\n 'certificate', certificate_id=certificate.id\n ),\n }\n )\n return response\n\n\n@expose(views, 'certificate', methods=['GET'],\n content_type='application/x-509-user-cert')\ndef get_certificate_pem_view(app, req, *, certificate_id):\n now = datetime.utcnow()\n\n try:\n certificate = get_certificate(\n req.db_session, now=now, certificate_id=certificate_id\n )\n except NotFoundError:\n raise vte.NotFound()\n\n return Response(\n certificate.raw_pem,\n content_type='application/x-509-user-cert',\n headers={\n 'X-Revoked': certificate.revoked or 'not-revoked',\n }\n )\n\n\n@expose_json(views, 'certificate', methods=['GET'], qs=0.9)\ndef get_certificate_view(app, req, *, certificate_id):\n now = datetime.utcnow()\n\n try:\n certificate = get_certificate(\n req.db_session, now=now, certificate_id=certificate_id\n )\n except NotFoundError:\n raise vte.NotFound()\n\n return _render_certificate_record(certificate, url_for=app.url_for)\n\n\ndef bind(builder):\n builder.add_bindings(views)\n", "sub_path": "pope_signing_service/views/certificates.py", "file_name": "certificates.py", "file_ext": "py", "file_size_in_byte": 4437, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "verktyg.Dispatcher", "line_number": 26, "usage_type": "call"}, {"api_name": "pope_signing_service.views._dummy_url_for", "line_number": 29, "usage_type": "name"}, {"api_name": "pope_server_common.json.render_datetime", "line_number": 36, "usage_type": "call"}, {"api_name": "pope_server_common.json.render_datetime", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "name"}, {"api_name": "pope_signing_service.certificates.list_certificates", "line_number": 53, "usage_type": "call"}, {"api_name": "verktyg.views.expose_json", "line_number": 46, "usage_type": "call"}, {"api_name": "pope_server_common.auth.require_auth", "line_number": 47, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 63, "usage_type": "call"}, {"api_name": "pope_signing_service.schemata.create_certificate_request.validate", "line_number": 64, "usage_type": "call"}, {"api_name": "pope_signing_service.schemata.create_certificate_request", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pope_signing_service.schemata", "line_number": 64, "usage_type": "name"}, {"api_name": "pope_server_common.json.parse_bigint", "line_number": 66, "usage_type": "call"}, {"api_name": "pope_server_common.json.parse_bigint", "line_number": 67, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicNumbers", "line_number": 68, "usage_type": "call"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 70, "usage_type": "call"}, {"api_name": "pope_signing_service.certificates.create_certificate", "line_number": 72, "usage_type": "call"}, {"api_name": "verktyg.Response", "line_number": 78, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 79, "usage_type": "call"}, {"api_name": "verktyg.views.expose_json", "line_number": 57, "usage_type": "call"}, {"api_name": "pope_server_common.auth.require_auth", "line_number": 58, "usage_type": "name"}, {"api_name": "verktyg.views.expose_json", "line_number": 94, "usage_type": "call"}, {"api_name": "pope_server_common.auth.require_auth", "line_number": 95, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 102, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 102, "usage_type": "name"}, {"api_name": "pope_signing_service.certificates.get_certificate", "line_number": 105, "usage_type": "call"}, {"api_name": "pope_signing_service.exceptions.NotFoundError", "line_number": 108, "usage_type": "name"}, {"api_name": "verktyg.exceptions.NotFound", "line_number": 109, "usage_type": "call"}, {"api_name": "verktyg.exceptions", "line_number": 109, "usage_type": "name"}, {"api_name": "verktyg.Response", "line_number": 111, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 112, "usage_type": "call"}, {"api_name": "verktyg.views.expose_json", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 128, "usage_type": "name"}, {"api_name": "pope_signing_service.certificates.get_certificate", "line_number": 131, "usage_type": "call"}, {"api_name": "pope_signing_service.exceptions.NotFoundError", "line_number": 134, "usage_type": "name"}, {"api_name": "verktyg.exceptions.NotFound", "line_number": 135, "usage_type": "call"}, {"api_name": "verktyg.exceptions", "line_number": 135, "usage_type": "name"}, {"api_name": "verktyg.Response", "line_number": 137, "usage_type": "call"}, {"api_name": "verktyg.views.expose", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 148, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 148, "usage_type": "name"}, {"api_name": "pope_signing_service.certificates.get_certificate", "line_number": 151, "usage_type": "call"}, {"api_name": "pope_signing_service.exceptions.NotFoundError", "line_number": 154, "usage_type": "name"}, {"api_name": "verktyg.exceptions.NotFound", "line_number": 155, "usage_type": "call"}, {"api_name": "verktyg.exceptions", "line_number": 155, "usage_type": "name"}, {"api_name": "verktyg.views.expose_json", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "397627588", "text": "import os\nimport sys\nDIRNAME = os.path.dirname(__file__)\nsys.path.append(os.path.join(DIRNAME, '..'))\n\nimport mujoco_py as mujoco\n\nfrom src.constrainedChasingEscapingEnv.envMujoco import IsTerminal, TransitionFunction\nfrom src.constrainedChasingEscapingEnv.state import GetAgentPosFromState\nfrom src.neuralNetwork.policyValueNet import GenerateModel, restoreVariables, ApproximatePolicy\nfrom src.constrainedChasingEscapingEnv.envMujoco import ResetUniform\nfrom src.episode import SampleTrajectory, chooseGreedyAction\nfrom exec.trajectoriesSaveLoad import saveToPickle\n\ndef main():\n # transition function\n\n dirName = os.path.dirname(__file__)\n physicsDynamicsPath = os.path.join(dirName, '..', 'env', 'xmls', 'twoAgents.xml')\n agentsBodyMassIndex = [6, 7]\n physicsSmallMassModel = mujoco.load_model_from_path(physicsDynamicsPath)\n physicsSmallMassModel.body_mass[agentsBodyMassIndex] = [4, 5]\n physicsLargeMassModel = mujoco.load_model_from_path(physicsDynamicsPath)\n physicsLargeMassModel.body_mass[agentsBodyMassIndex] = [8, 10]\n physicsSmallMassSimulation = mujoco.MjSim(physicsSmallMassModel)\n physicsLargeMassSimulation = mujoco.MjSim(physicsLargeMassModel)\n # set_constants fit for mujoco_py version >= 2.0, no fit for 1.50\n physicsSmallMassSimulation.set_constants()\n physicsLargeMassSimulation.set_constants()\n\n sheepId = 0\n wolfId = 1\n xPosIndex = [2, 3]\n getSheepXPos = GetAgentPosFromState(sheepId, xPosIndex)\n getWolfXPos = GetAgentPosFromState(wolfId, xPosIndex)\n killzoneRadius = 2\n isTerminal = IsTerminal(killzoneRadius, getSheepXPos, getWolfXPos)\n\n numSimulationFrames = 20\n transitSmallMassAgents = TransitionFunction(physicsSmallMassSimulation, isTerminal, numSimulationFrames)\n transitLargeMassAgents = TransitionFunction(physicsLargeMassSimulation, isTerminal, numSimulationFrames)\n\n transit = transitSmallMassAgents\n\n\n # reset function\n qPosInit = (0, 0, 0, 0) # (initial position of sheep, initial position of wolf)\n qVelInit = (0, 0, 0, 0) # (initial velocity of sheep, initial velocity of wolf)\n qPosInitNoise = 9.7 # adds some randomness to the initial positions\n qVelInitNoise = 5 # adds some randomness to the initial velocities\n numAgent = 2\n reset = ResetUniform(physicsSmallMassSimulation, qPosInit, qVelInit, numAgent, qPosInitNoise, qVelInitNoise)\n\n # sample trajectory\n maxRunningSteps = 10 # max possible length of the trajectory/episode\n sampleTrajectory = SampleTrajectory(maxRunningSteps, transit, isTerminal, reset, chooseGreedyAction)\n\n # Neural Network\n actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7), (0, -10), (7, -7)]\n numActionSpace = len(actionSpace)\n numStateSpace = 12\n regularizationFactor = 1e-4\n sharedWidths = [128]\n actionLayerWidths = [128]\n valueLayerWidths = [128]\n generateModel = GenerateModel(numStateSpace, numActionSpace, regularizationFactor)\n\n # wolf NN Policy\n wolfModelPath = os.path.join(dirName, '..','NNModels','wolfNNModels', 'killzoneRadius=0.5_maxRunningSteps=10_numSimulations=100_qPosInitNoise=9.7_qVelInitNoise=5_rolloutHeuristicWeight=0.1_trainSteps=99999')\n wolfNNModel = generateModel(sharedWidths, actionLayerWidths, valueLayerWidths)\n restoreVariables(wolfNNModel, wolfModelPath)\n approximateWolfPolicy = ApproximatePolicy(wolfNNModel, actionSpace)\n\n # sheep NN Policy\n sheepModelPath = os.path.join(dirName, '..','NNModels','sheepNNModels', 'killzoneRadius=2_maxRunningSteps=25_numSimulations=100_qPosInitNoise=9.7_qVelInitNoise=8_rolloutHeuristicWeight=0.1_trainSteps=99999')\n sheepNNModel = generateModel(sharedWidths, actionLayerWidths, valueLayerWidths)\n restoreVariables(sheepNNModel, sheepModelPath)\n approximateSheepPolicy = ApproximatePolicy(sheepNNModel, actionSpace)\n\n approximatePolicyList = [approximateSheepPolicy, approximateWolfPolicy]\n policy = lambda state: [{approximatePolicy(state): 1} for approximatePolicy in approximatePolicyList]\n\n trajectory = sampleTrajectory(policy)\n dataIndex = 11\n dataPath = os.path.join(dirName, '..', 'trainedData', 'trajectory'+ str(dataIndex) + '.pickle')\n saveToPickle(trajectory, dataPath)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "exec/sample.py", "file_name": "sample.py", "file_ext": "py", "file_size_in_byte": 4294, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "os.path.dirname", "line_number": 3, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "mujoco_py.load_model_from_path", "line_number": 21, "usage_type": "call"}, {"api_name": "mujoco_py.load_model_from_path", "line_number": 23, "usage_type": "call"}, {"api_name": "mujoco_py.MjSim", "line_number": 25, "usage_type": "call"}, {"api_name": "mujoco_py.MjSim", "line_number": 26, "usage_type": "call"}, {"api_name": "src.constrainedChasingEscapingEnv.state.GetAgentPosFromState", "line_number": 34, "usage_type": "call"}, {"api_name": "src.constrainedChasingEscapingEnv.state.GetAgentPosFromState", "line_number": 35, "usage_type": "call"}, {"api_name": "src.constrainedChasingEscapingEnv.envMujoco.IsTerminal", "line_number": 37, "usage_type": "call"}, {"api_name": "src.constrainedChasingEscapingEnv.envMujoco.TransitionFunction", "line_number": 40, "usage_type": "call"}, {"api_name": "src.constrainedChasingEscapingEnv.envMujoco.TransitionFunction", "line_number": 41, "usage_type": "call"}, {"api_name": "src.constrainedChasingEscapingEnv.envMujoco.ResetUniform", "line_number": 52, "usage_type": "call"}, {"api_name": "src.episode.SampleTrajectory", "line_number": 56, "usage_type": "call"}, {"api_name": "src.episode.chooseGreedyAction", "line_number": 56, "usage_type": "argument"}, {"api_name": "src.neuralNetwork.policyValueNet.GenerateModel", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "src.neuralNetwork.policyValueNet.restoreVariables", "line_number": 71, "usage_type": "call"}, {"api_name": "src.neuralNetwork.policyValueNet.ApproximatePolicy", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "src.neuralNetwork.policyValueNet.restoreVariables", "line_number": 77, "usage_type": "call"}, {"api_name": "src.neuralNetwork.policyValueNet.ApproximatePolicy", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "exec.trajectoriesSaveLoad.saveToPickle", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "262917194", "text": "import os\nimport pandas as pd\nimport numpy as np\nfrom scipy import ndimage\nfrom scipy import misc\n\n\"\"\"I have downloaded the screencaps from user 409's game. This command changes\n#the directory so that we can access the image files.\"\"\"\nos.chdir('C:\\\\Users\\\\hkhan\\\\Documents\\\\CollinsLab\\\\Screens\\\\409screens')\n\"\"\"Let's make sure we're in the right place!\n#Right now this isn't generalized, it just works for the directory I saved pictures in.\n\"\"\"\nprint(\"Right now we're in \" + str(os.getcwd()) + \".\")\n\nclass Pic(object):\n def __init__(self, file, pic=None, shape=[], colors=[], qbertloc=[]):\n self.file = file\n self.pic = pic\n self.shape = shape\n self.colors = colors\n self.qbertloc = qbertloc\n\n \"\"\"These methods update the instance attributes of the Pic object:\n - SciPy image object\n - color list.\n - Qbert's color locations\n \"\"\"\n def choosepic(self, file):\n newpic = misc.imread(file)\n self.file = file\n self.pic = newpic\n self.shape = newpic.shape\n self.colors = self.colorlist()\n self.qbertloc = self.colorlocs()\n\n \"\"\"A list of the discinct colorways in a photo\"\"\"\n def colorlist(self):\n color = []\n for j in range(self.pic.shape[0] - 1):\n for i in range(self.pic.shape[1] - 1):\n if [self.pic[j][i][0],self.pic[j][i][1],self.pic[j][i][2]] not in color:\n color.append([self.pic[j][i][0],self.pic[j][i][1],self.pic[j][i][2]])\n return color\n\n def colorlocs(self):\n \"\"\"Identify QBERT's locations\"\"\"\n colorlocs = []\n for j in range(self.pic.shape[0] - 1):\n for i in range(self.pic.shape[1] - 1):\n if self.pic[j,i][0]==181 and self.pic[j,i][1]==83 and self.pic[j,i][2]==40:\n colorlocs.append([j,i])\n return colorlocs\n\n def snakelocs(self):\n \"\"\"Identify snake's locations\"\"\"\n colorlocs = []\n for j in range(self.pic.shape[0] - 1):\n for i in range(self.pic.shape[1] - 1):\n if self.pic[j,i][0]==146 and self.pic[j,i][1]==70 and self.pic[j,i][2]==192:\n colorlocs.append([j,i])\n return colorlocs\n\n def monsterlocs(self):\n \"\"\"Identify monster's locations\"\"\"\n colorlocs = []\n li = [[138, 12], [138, 13], [138, 14], [138, 15], [138, 16], [138, 17], [138, 18], [138, 19], [138, 140], [138, 141], [138, 142], [138, 143], [138, 144], [138, 145], [138, 146], [138, 147], [139, 12], [139, 13], [139, 14], [139, 15], [139, 16], [139, 17], [139, 18], [139, 19], [139, 140], [139, 141], [139, 142], [139, 143], [139, 144], [139, 145], [139, 146], [139, 147]]\n for j in range(self.pic.shape[0] - 1):\n for i in range(self.pic.shape[1] - 1):\n if self.pic[j,i][0]==50 and self.pic[j,i][1]==132 and self.pic[j,i][2]==50:\n if [j,i] not in li:\n colorlocs.append([j,i])\n return colorlocs\n\n def qbertmean(self):\n \"\"\"QBERT's mean location\"\"\"\n if self.isqbertin():\n arr = np.array(self.qbertloc)\n return [np.mean(arr[:,0]), np.mean(arr[:,1])]\n else:\n return None\n\n def snakemean(self):\n \"\"\"Snake's mean location\"\"\"\n if self.issnakein():\n arr = np.array(self.snakelocs())\n return [np.mean(arr[:,0]), np.mean(arr[:,1])]\n else:\n return None\n\n def monstermean(self):\n \"\"\"Monster's mean location\"\"\"\n if self.ismonsterin():\n arr = np.array(self.monsterlocs())\n return [np.mean(arr[:,0]), np.mean(arr[:,1])]\n else:\n return None\n\n \"\"\"These methods describe the image:\n - isqbertin() tells if QBert is in the image\n - issnakein() tells if the snake is in the image\n - ismonsterin() tells if the monster is in the image\n \"\"\"\n def isqbertin(self):\n yes = False\n for j in range(self.pic.shape[0] - 1):\n for i in range(self.pic.shape[1] - 1):\n if self.pic[j,i][0]==181 and self.pic[j,i][1]==83 and self.pic[j,i][2]==40:\n yes = True\n return yes\n\n def issnakein(self):\n yes = False\n for j in range(self.pic.shape[0] - 1):\n for i in range(self.pic.shape[1] - 1):\n if self.pic[j,i][0]==146 and self.pic[j,i][1]==70 and self.pic[j,i][2]==192:\n yes = True\n return yes\n\n def ismonsterin(self):\n li = [[138, 12], [138, 13], [138, 14], [138, 15], [138, 16], [138, 17], [138, 18], [138, 19], [138, 140], [138, 141], [138, 142], [138, 143], [138, 144], [138, 145], [138, 146], [138, 147], [139, 12], [139, 13], [139, 14], [139, 15], [139, 16], [139, 17], [139, 18], [139, 19], [139, 140], [139, 141], [139, 142], [139, 143], [139, 144], [139, 145], [139, 146], [139, 147]]\n yes = False\n for j in range(self.pic.shape[0] - 1):\n for i in range(self.pic.shape[1] - 1):\n if self.pic[j,i][0]==50 and self.pic[j,i][1]==132 and self.pic[j,i][2]==50:\n if [j,i] not in li:\n yes = True\n return yes\n\n def lives(self):\n list = []\n for j in range(self.pic.shape[0] - 1):\n for i in range(self.pic.shape[1] - 1):\n if self.pic[j,i][0]==210 and self.pic[j,i][1]==210 and self.pic[j,i][2]==64:\n list.append([j,i])\n return list\n\n \"\"\"This method is a text description of the Pic object.\"\"\"\n def describe(self):\n print(\"We are looking at \" + str(self.file) + \".\")\n print(\"There are \" + str(self.pic.shape[0]) + \" rows, \" + str(self.pic.shape[1]) + \" columns.\")\n print(\"The colors are \" + str(self.colors) + \" .\")\n inclusion = \"\"\n inclusion2 = \"\"\n inclusion3 = \"\"\n bool = self.isqbertin()\n if bool==False:\n inclusion = \"not \"\n print(\"QBert is \" + inclusion + \"in the picture.\")\n bool2 = self.issnakein()\n if bool2==False:\n inclusion2 = \"not \"\n print(\"The snake is \" + inclusion2 + \"in the picture.\")\n bool3 = self.ismonsterin()\n if bool3==False:\n inclusion3 = \"not \"\n print(\"The monster is \" + inclusion3 + \"in the picture.\")\n\n\"\"\"Here we construct a default Pic object for convenience, called p.\nThis object can be used as a variable updated to whichever photo you want to look at.\nCall p.describe() in command prompt for more information.\n\"\"\"\np = Pic(\"0.png\", None, \"\", [], [])\np.choosepic(p.file)\n\nscore_locs = [\n[6, 35],\n[6, 36],\n[6, 37],\n[6, 43],\n[6, 44],\n[6, 45],\n[6, 53],\n[6, 58],\n[6, 59],\n[6, 60],\n[6, 61],\n[6, 62],\n[6, 66],\n[6, 67],\n[6, 68],\n[6, 69],\n[6, 70],\n[7, 34],\n[7, 38],\n[7, 42],\n[7, 46],\n[7, 52],\n[7, 53],\n[7, 62],\n[7, 66],\n[8, 34],\n[8, 38],\n[8, 42],\n[8, 46],\n[8, 51],\n[8, 53],\n[8, 61],\n[8, 66],\n[8, 67],\n[8, 68],\n[8, 69],\n[9, 34],\n[9, 38],\n[9, 42],\n[9, 46],\n[9, 50],\n[9, 53],\n[9, 60],\n[9, 70],\n[10, 34],\n[10, 38],\n[10, 42],\n[10, 46],\n[10, 50],\n[10, 51],\n[10, 52],\n[10, 53],\n[10, 54],\n[10, 59],\n[10, 70],\n[11, 34],\n[11, 38],\n[11, 42],\n[11, 46],\n[11, 53],\n[11, 59],\n[11, 66],\n[11, 70],\n[12, 35],\n[12, 36],\n[12, 37],\n[12, 43],\n[12, 44],\n[12, 45],\n[12, 53],\n[12, 59],\n[12, 67],\n[12, 68],\n[12, 69]]\n", "sub_path": "Scripts/PicObj.py", "file_name": "PicObj.py", "file_ext": "py", "file_size_in_byte": 7352, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "86", "api": [{"api_name": "os.chdir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 13, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 29, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "313648092", "text": "from flask import render_template, flash, redirect, url_for, request\nfrom app import app, connector\nimport app.db.connection_methods as cm\n\n\ndispatch_table = {\n 'Aktorzy z filmu': cm.get_actors_from_movie,\n 'Czas trwania': cm.get_movies_by_min_runtime,\n 'Filmy danego reżysera': cm.get_movies_by_director,\n 'Filmy z aktorem': cm.get_movies_with_actor,\n 'Filmy z aktorem w gatunku': cm.get_movies_with_actor_in_genre,\n 'Gatunek': cm.get_movies_by_genre,\n 'Rating': cm.get_movies_by_min_rating,\n 'Rok produkcji': cm.get_movies_by_min_year,\n 'Wszyscy aktorzy': cm.get_actors,\n 'Wszyscy reżyserzy': cm.get_directors,\n 'Wszystkie filmy': cm.get_movies,\n 'Wszystkie gatunki': cm.get_genres,\n 'Wszystkie wytwórnie filmowe': cm.get_production_companies,\n}\n\n\napp.jinja_env.globals.update(get_menu_items=lambda : sorted(dispatch_table.keys()))\n\n\n@app.route('/')\n@app.route('/home')\ndef home():\n return render_template('home.html')\n\n\n@app.route('/result', methods=['GET', 'POST'])\ndef result():\n form_data = list(request.form.values())\n\n if form_data:\n args = [float(arg) if arg.isdigit() else arg for arg in form_data[:-1]]\n action = form_data[-1]\n data = connector.make_request(dispatch_table[action], *args).values()\n else:\n flash('nie wybrano żadnej akcji')\n return redirect(url_for('home'))\n \n if not data:\n flash('nie znaleziono węzłów o podanych parametrach')\n return redirect(url_for('home'))\n\n return render_template('result.html', data=data)", "sub_path": "app/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 1715, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "app.db.connection_methods.get_actors_from_movie", "line_number": 7, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 7, "usage_type": "name"}, {"api_name": "app.db.connection_methods.get_movies_by_min_runtime", "line_number": 8, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 8, "usage_type": "name"}, {"api_name": "app.db.connection_methods.get_movies_by_director", "line_number": 9, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 9, "usage_type": "name"}, {"api_name": "app.db.connection_methods.get_movies_with_actor", "line_number": 10, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 10, "usage_type": "name"}, {"api_name": "app.db.connection_methods.get_movies_with_actor_in_genre", "line_number": 11, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 11, "usage_type": "name"}, {"api_name": "app.db.connection_methods.get_movies_by_genre", "line_number": 12, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 12, "usage_type": "name"}, {"api_name": "app.db.connection_methods.get_movies_by_min_rating", "line_number": 13, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 13, "usage_type": "name"}, {"api_name": "app.db.connection_methods.get_movies_by_min_year", "line_number": 14, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 14, "usage_type": "name"}, {"api_name": "app.db.connection_methods.get_actors", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 15, "usage_type": "name"}, {"api_name": "app.db.connection_methods.get_directors", "line_number": 16, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 16, "usage_type": "name"}, {"api_name": "app.db.connection_methods.get_movies", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 17, "usage_type": "name"}, {"api_name": "app.db.connection_methods.get_genres", "line_number": 18, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 18, "usage_type": "name"}, {"api_name": "app.db.connection_methods.get_production_companies", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.db.connection_methods", "line_number": 19, "usage_type": "name"}, {"api_name": "app.app.jinja_env.globals.update", "line_number": 23, "usage_type": "call"}, {"api_name": "app.app.jinja_env", "line_number": 23, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 26, "usage_type": "call"}, {"api_name": "app.app", "line_number": 26, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 27, "usage_type": "call"}, {"api_name": "app.app", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request.form.values", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "app.connector.make_request", "line_number": 39, "usage_type": "call"}, {"api_name": "app.connector", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 48, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 32, "usage_type": "call"}, {"api_name": "app.app", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "283050264", "text": "from gtts import gTTS\nimport speech_recognition as sr\nfrom subprocess import call # MAC / LINUX\n#from playsound import playsound # WINDOWS\nfrom requests import get\nfrom bs4 import BeautifulSoup\nimport webbrowser as browser\nimport json\n\n\n##### CONFIGURAÇÕES #####\nhotword = 'rose'\n\nwith open('rosie-python-assistente-fe02a8d39c53.json') as credenciais_google:\n credenciais_google = credenciais_google.read()\n\n\n##### FUNÇÕES PRINCIPAIS #####\n\ndef monitora_audio():\n microfone = sr.Recognizer()\n with sr.Microphone() as source:\n while True:\n print(\"Aguardando o Comando: \")\n audio = microfone.listen(source)\n try:\n trigger = microfone.recognize_google_cloud(audio, credentials_json=credenciais_google, language='pt-BR')\n trigger = trigger.lower()\n\n if hotword in trigger:\n print('COMANDO: ', trigger)\n responde('feedback')\n executa_comandos(trigger)\n break\n\n except sr.UnknownValueError:\n print(\"Google not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results from Google Cloud Speech service; {0}\".format(e))\n return trigger\n\ndef responde(arquivo):\n call(['afplay', 'audios/' + arquivo + '.mp3'])\n\ndef cria_audio(mensagem):\n tts = gTTS(mensagem, lang='pt-br')\n tts.save('audios/mensagem.mp3')\n print('ROSIE: ', mensagem)\n call(['afplay', 'audios/mensagem.mp3']) # OSX\n\n\ndef executa_comandos(trigger):\n if 'notícias' in trigger:\n ultimas_noticias()\n\n elif 'toca' in trigger and 'bee gees' in trigger:\n playlists('bee_gees')\n\n elif 'toca' in trigger and 'taylor davis' in trigger:\n playlists('taylor_davis')\n\n elif 'tempo agora' in trigger:\n previsao_tempo(tempo=True)\n\n elif 'temperatura hoje' in trigger:\n previsao_tempo(minmax=True)\n\n\n else:\n mensagem = trigger.strip(hotword)\n cria_audio(mensagem)\n print('C. INVÁLIDO', mensagem)\n responde('comando_invalido')\n\n\n##### FUNÇÕES COMANDOS #####\n\ndef ultimas_noticias():\n site = get('https://news.google.com/news/rss?ned=pt_br&gl=BR&hl=pt')\n noticias = BeautifulSoup(site.text, 'html.parser')\n for item in noticias.findAll('item')[:2]:\n mensagem = item.title.text\n cria_audio(mensagem)\n\ndef playlists(album):\n if album == 'bee_gees':\n browser.open('https://open.spotify.com/track/33ALuUDfftTs2NEszyvJRm')\n elif album == 'taylor_davis':\n browser.open('https://open.spotify.com/track/3MKep4BfEwSlAHuFJrA9aV')\n\ndef previsao_tempo(tempo=False, minmax=False):\n site = get('http://api.openweathermap.org/data/2.5/weather?id=3451190&APPID=6ca200b2fb7b3a692e8bbad3f663bd85&units=metric&lang=pt')\n clima = site.json()\n #print(json.dumps(clima, indent=4))\n temperatura=clima['main']['temp']\n minima=clima['main']['temp_min']\n maxima=clima['main']['temp_max']\n descricao=clima['weather'][0]['description']\n if tempo:\n mensagem = f'No momento fazem {temperatura} graus com: {descricao}'\n if minmax:\n mensagem = f'Mínima de {minima} e máxima de {maxima}'\n cria_audio(mensagem)\n\ndef main():\n while True:\n monitora_audio()\n\nmain()\n\n\n\n\n", "sub_path": "learn/alura/python/assistente/rosie.py", "file_name": "rosie.py", "file_ext": "py", "file_size_in_byte": 3327, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "speech_recognition.Recognizer", "line_number": 21, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 22, "usage_type": "call"}, {"api_name": "speech_recognition.UnknownValueError", "line_number": 36, "usage_type": "attribute"}, {"api_name": "speech_recognition.RequestError", "line_number": 38, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 43, "usage_type": "call"}, {"api_name": "gtts.gTTS", "line_number": 46, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 79, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 80, "usage_type": "call"}, {"api_name": "webbrowser.open", "line_number": 87, "usage_type": "call"}, {"api_name": "webbrowser.open", "line_number": 89, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "368277106", "text": "# The jankiest simulator for our janky debugger.\n#\n# Designed for tracing firmware execution, and untangling code flow while\n# talking to actual in-system hardware. The ARM CPU runs (very slowly) in\n# simulation in Python, proxying its I/O over a debug port. We need a debug\n# port that owns its own main loop, so this doesn't work with the default\n# SCSI/USB port, it requires the hardware hack %bitbang interface.\n#\n# CPU state is implemented as a flat regs[] array, with a few special indices.\n# This can be small because it doesn't understand machine code, just assembly\n# language. This is really more of an ARM assembly interpreter than a CPU\n# simulator, but it'll be close enough.\n\n__all__ = [ 'simulate_arm' ]\n\nimport cStringIO, struct, json\nfrom code import *\nfrom dump import *\n\n\ndef simulate_arm(device):\n \"\"\"Create a new ARM simulator, backed by the provided remote device\n Returns a SimARM object with regs[], memory, and step().\n \"\"\"\n m = SimARMMemory(device)\n\n m.skip(0x04001000, \"Reset control?\")\n m.skip(0x04002088, \"LED / Solenoid GPIOs, breaks bitbang backdoor\")\n m.skip(0x04030f04, \"Memory region control flags\")\n m.skip(0x04030f20, \"DRAM memory region, contains backdoor code\")\n m.skip(0x04030f24, \"DRAM memory region, contains backdoor code\")\n m.skip(0x04030f40, \"Stack memory region\")\n m.skip(0x04030f44, \"Stack memory region\")\n \n # Stub out a loop during init that seems to be spinning with a register read inside (performance)\n m.patch(0x0007bc3c, 'nop; nop')\n\n # Stub out encrypted functions related to DRM. Hopefully we don't need to bother supporting them.\n m.patch(0x00011080, 'mov r0, #0; bx lr', thumb=False)\n\n # Try keeping some of the RAM local (performance)\n m.local_ram(0x1c00000, 0x1c1ffff)\n m.local_ram(0x1f57000, 0x1ffffff)\n m.local_ram(0x2000600, 0x2000fff)\n\n return SimARM(m)\n\n\nclass RunEncoder(object):\n \"\"\"Find runs that write the same pattern to consecutive addresses.\n Both 'write' and 'flush' return a (count, address, pattern, size) tuple,\n where 'count' might be zero if no work needs to be done.\n \"\"\"\n def __init__(self):\n self.count = 0\n self.key = (None, None, None)\n\n def write(self, address, pattern, size):\n if self.key == (address - self.count * size, pattern, size):\n self.count += 1\n return (0, None, None, None)\n else:\n r = self.flush()\n self.key = (address, pattern, size)\n self.count = 1\n return r\n\n def flush(self):\n r = (self.count,) + self.key\n self.count = 0\n self.key = (None, None, None)\n return r\n\n\ndef asr(a, b):\n \"\"\"Arithmetic shift right\n Returns (result, carry)\n \"\"\"\n if a & 0x80000000: a |= 0xffffffff00000000\n return (a >> b, 1 & (a >> (b-1)))\n\ndef ror(a, b):\n \"\"\"Rotate right\n Returns (result, carry)\n \"\"\"\n return ( (a >> b) | ((a << (32 - b)) & 0xffffffff), 1 & (a >> (b-1)) )\n\ndef rol(a, b):\n \"\"\"Rotate left\n Returns (result, carry)\n \"\"\"\n return ( (a >> (32 - b)) | ((a << b) & 0xffffffff), 1 & (a >> (31 - b)) )\n\n\nclass SimARMMemory(object):\n \"\"\"Memory manager for a simulated ARM core, backed by a remote device.\n \n This manages a tiny bit of caching and write consolidation, to conserve\n bandwidth on the bitbang debug pipe.\n \"\"\"\n def __init__(self, device, logfile=None):\n self.device = device\n self.logfile = logfile\n\n # Instruction cache\n self.instructions = {}\n\n # Special addresses\n self.skip_stores = {}\n self.patch_notes = {}\n\n # Local RAM and cached flash, reads and writes don't go to hardware\n self.local_addresses = cStringIO.StringIO()\n self.local_data = cStringIO.StringIO()\n\n # Detect fills\n self.rle = RunEncoder()\n\n def skip(self, address, reason):\n self.skip_stores[address] = reason\n\n def patch(self, address, code, thumb = True):\n # Note the extra nop to facilitate the way load_assembly sizes instructions\n s = assemble_string(address, code + '\\nnop', thumb=thumb)\n lines = disassembly_lines(disassemble_string(s, address=address, thumb=thumb))\n self._load_assembly(address, lines, thumb=thumb)\n for l in lines[:1]:\n self.patch_notes[l.address] = True\n\n def save_state(self, filebase):\n \"\"\"Save state to disk, using files beginning with 'filebase'\"\"\"\n with open(filebase + '.addr', 'wb') as f:\n f.write(self.local_addresses.getvalue())\n with open(filebase + '.data', 'wb') as f:\n f.write(self.local_data.getvalue())\n\n def load_state(self, filebase):\n \"\"\"Load state from save_state()\"\"\"\n with open(filebase + '.addr', 'rb') as f:\n self.local_addresses = cStringIO.StringIO()\n self.local_addresses.write(f.read())\n with open(filebase + '.data', 'rb') as f:\n self.local_data = cStringIO.StringIO()\n self.local_data.write(f.read())\n\n def local_ram(self, begin, end):\n self.local_addresses.seek(begin)\n self.local_addresses.write('\\xff' * (end - begin + 1))\n\n def note(self, address):\n if self.patch_notes.get(address):\n return 'PATCH'\n return ''\n\n def log_store(self, address, data, size='word', message=''):\n if self.logfile:\n self.logfile.write(\"arm-mem-STORE %4s[%08x] <- %08x %s\\n\" % (size, address, data, message))\n\n def log_fill(self, address, pattern, count, size='word'):\n if self.logfile:\n self.logfile.write(\"arm-mem-FILL %4s[%08x] <- %08x * %04x\\n\" % (size, address, pattern, count))\n\n def log_load(self, address, data, size='word'):\n if self.logfile:\n self.logfile.write(\"arm-mem-LOAD %4s[%08x] -> %08x\\n\" % (size, address, data))\n\n def log_prefetch(self, address):\n if self.logfile:\n self.logfile.write(\"arm-prefetch [%08x]\\n\" % address)\n\n def check_address(self, address):\n # Called before write (crash less) and after read (curiosity)\n if address >= 0x05000000:\n raise IndexError(\"Address %08x doesn't look valid. Simulator bug?\" % address)\n\n def post_rle_store(self, count, address, pattern, size):\n \"\"\"Process stores after RLE consolidation has happened\"\"\"\n\n if count > 1 and size == 4:\n self.check_address(address)\n self.log_fill(address, pattern, count)\n self.device.fill_words(address, pattern, count)\n return\n\n if count > 1 and size == 1:\n self.check_address(address)\n self.log_fill(address, pattern, count, 'byte')\n self.device.fill_bytes(address, pattern, count)\n return\n\n while count > 0:\n self.check_address(address)\n\n if size == 4:\n self.log_store(address, pattern)\n self.device.poke(address, pattern)\n\n elif size == 2:\n self.log_store(address, data, 'half')\n self.device.poke_byte(address, data & 0xff)\n self.device.poke_byte(address + 1, data >> 8)\n\n else:\n assert size == 1\n self.log_store(address, pattern, 'byte')\n self.device.poke_byte(address, pattern)\n\n count -= 1\n address += size\n\n def flush(self):\n # If there's a cached fill, make it happen\n self.post_rle_store(*self.rle.flush())\n\n def fetch_local_data(self, address, size, max_round_trips = None):\n \"\"\"Immediately read a block of data from the remote device into the local cache.\n All cached bytes will stay in the cache permanently, and writes will no longer go to hardware.\n Returns the length of the block we actually read, in bytes.\n \"\"\"\n block = read_block(self.device, address, size, max_round_trips=max_round_trips)\n self.local_ram(address, address + len(block) - 1)\n self.local_data.seek(address)\n self.local_data.write(block)\n return len(block)\n\n def local_data_available(self, address, limit = 0x100):\n \"\"\"How many bytes of local data are available at an address?\"\"\"\n self.local_addresses.seek(address)\n flags = self.local_addresses.read(limit)\n return len(flags[:flags.find('\\x00')])\n\n def flash_prefetch_hint(self, address):\n \"\"\"We're accessing an address, if it's flash maybe prefetch around it.\n Returns the number of bytes prefetched or the number of bytes already available.\n Guaranteed to have at least 8 bytes available for flash addresses.\n \"\"\"\n # Flash prefetch, whatever we can get quickly\n avail = self.local_data_available(address)\n if address < 0x200000 and avail < 8:\n self.flush()\n self.log_prefetch(address)\n avail = self.fetch_local_data(address, size=0x100, max_round_trips=1) \n return avail\n\n def load(self, address):\n self.flash_prefetch_hint(address)\n self.local_addresses.seek(address)\n if self.local_addresses.read(4) == '\\xff\\xff\\xff\\xff':\n self.local_data.seek(address)\n return struct.unpack('= 8\n self.local_data.seek(address)\n data = self.local_data.read(block_size)\n lines = disassembly_lines(disassemble_string(data, address, thumb=thumb))\n self._load_assembly(address, lines, thumb=thumb)\n\n def _load_assembly(self, address, lines, thumb):\n for i in range(len(lines) - 1):\n lines[i].next_address = lines[i+1].address\n addr = thumb | (lines[i].address & ~1)\n if addr not in self.instructions:\n self.instructions[addr] = lines[i]\n\n\nclass SimARM(object):\n \"\"\"Main simulator class for the ARM subset we support in %sim\n\n Registers are available at regs[], call step() to single-step. Uses the\n provided memory manager object to handle load(), store(), and fetch().\n\n The lightweight CPU state is available as a dictionary property 'state'.\n Full local state including local memory can be stored with save_state().\n \"\"\"\n def __init__(self, memory):\n self.memory = memory\n self.reset(0)\n\n # Register lookup\n self.reg_names = ('r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', \n 'r8', 'r9', 'r10', 'r11', 'r12', 'sp', 'lr', 'pc')\n self.alt_names = ('a1', 'a2', 'a3', 'a4', 'v1', 'v2', 'v3', 'v4',\n 'v5', 'sb', 'sl', 'fp', 'ip', 'r13', 'r14', 'r15')\n self.reg_numbers = {}\n for i, name in enumerate(self.reg_names): self.reg_numbers[name] = i\n for i, name in enumerate(self.alt_names): self.reg_numbers[name] = i\n\n # Initialize ldm/stm variants\n for memop in ('ld', 'st'):\n for mode in ('', 'ia', 'ib', 'da', 'db', 'fd', 'fa', 'ed', 'ea'):\n self._generate_ldstm(memop, mode)\n\n # Initialize condition codes\n for name in self.__class__.__dict__.keys():\n if name.startswith('op_'):\n self._generate_condition_codes(getattr(self, name), name + '%s')\n\n # Near branches\n self.__dict__['op_b.n'] = self.op_b\n self._generate_condition_codes(self.op_b, 'op_b%s.n')\n\n def reset(self, vector):\n self.regs = [0] * 16\n self.thumb = vector & 1\n self.cpsrV = False\n self.cpsrC = False\n self.cpsrZ = False\n self.cpsrN = False\n self.regs[15] = vector & 0xfffffffe\n self.regs[14] = 0xffffffff\n self.step_count = 0\n\n _state_fields = ('regs', 'thumb', 'cpsrV', 'cpsrC', 'cpsrZ', 'cpsrN', 'step_count')\n\n @property\n def state(self):\n d = {}\n for name in self._state_fields:\n d[name] = getattr(self, name)\n return d\n\n @state.setter\n def state(self, value):\n for name in self._state_fields:\n setattr(self, name, value[name])\n\n def save_state(self, filebase):\n \"\"\"Save state to disk, using files beginning with 'filebase'\"\"\"\n self.memory.save_state(filebase)\n with open(filebase + '.core', 'w') as f:\n json.dump(self.state, f)\n\n def load_state(self, filebase):\n \"\"\"Load state from save_state()\"\"\"\n self.memory.load_state(filebase)\n with open(filebase + '.core', 'r') as f:\n self.state = json.load(f)\n\n def step(self):\n \"\"\"Step the simulated ARM by one instruction\"\"\"\n self.step_count += 1\n regs = self.regs\n instr = self.memory.fetch(regs[15], self.thumb)\n self._branch = None\n\n if self.thumb:\n regs[15] = (instr.next_address + 3) & ~3\n else:\n regs[15] += 8\n\n try:\n getattr(self, 'op_' + instr.op)(instr)\n regs[15] = self._branch or instr.next_address\n except:\n regs[15] = instr.address\n raise\n\n def get_next_instruction(self):\n return self.memory.fetch(self.regs[15], self.thumb)\n\n def flags_string(self):\n return ''.join([\n '-N'[self.cpsrN],\n '-Z'[self.cpsrZ],\n '-C'[self.cpsrC],\n '-V'[self.cpsrV],\n '-T'[self.thumb],\n ]) \n\n def summary_line(self):\n up_next = self.get_next_instruction()\n return \"%s %s >%08x %5s %-8s %s\" % (\n str(self.step_count).rjust(8, '.'),\n self.flags_string(),\n up_next.address,\n self.memory.note(up_next.address),\n up_next.op,\n up_next.args)\n\n def register_trace(self):\n parts = []\n for y in range(4):\n for x in range(4):\n i = y + x*4\n parts.append('%4s=%08x' % (self.reg_names[i], self.regs[i]))\n parts.append('\\n')\n return ''.join(parts)\n\n def register_trace_line(self, count=15):\n return ' '.join('%s=%08x' % (self.reg_names[i], self.regs[i]) for i in range(count))\n\n def copy_registers_from(self, ns):\n self.regs = [ns.get(n, 0) for n in self.reg_names]\n\n def copy_registers_to(self, ns):\n for i, name in enumerate(self.reg_names):\n ns[name] = self.regs[i]\n\n def _generate_ldstm(self, memop, mode):\n impl = mode or 'ia'\n\n # Stack mnemonics\n if memop == 'st' and mode =='fd': impl = 'db'\n if memop == 'st' and mode =='fa': impl = 'ib'\n if memop == 'st' and mode =='ed': impl = 'da'\n if memop == 'st' and mode =='ea': impl = 'ia'\n if memop == 'ld' and mode =='fd': impl = 'ia'\n if memop == 'ld' and mode =='fa': impl = 'da'\n if memop == 'ld' and mode =='ed': impl = 'ib'\n if memop == 'ld' and mode =='ea': impl = 'db'\n\n def fn(i):\n left, right = i.args.split(', ', 1)\n assert right[0] == '{'\n writeback = left.endswith('!')\n left = self.reg_numbers[left.strip('!')]\n addr = self.regs[left] \n \n for r in right.strip('{}').split(', '):\n if impl == 'ib': addr += 4\n if impl == 'db': addr -= 4\n if memop == 'st':\n self.memory.store(addr, self.regs[self.reg_numbers[r]])\n else:\n self._dstpc(r, self.memory.load(addr))\n if impl == 'ia': addr += 4\n if impl == 'da': addr -= 4\n \n if writeback:\n self.regs[left] = addr\n\n setattr(self, 'op_' + memop + 'm' + mode, fn)\n self._generate_condition_codes(fn, 'op_' + memop + 'm%s' + mode)\n self._generate_condition_codes(fn, 'op_' + memop + 'm' + mode + '%s')\n\n def _generate_condition_codes(self, fn, name):\n setattr(self, name % 'eq', lambda i, fn=fn: (self.cpsrZ ) and fn(i))\n setattr(self, name % 'ne', lambda i, fn=fn: (not self.cpsrZ ) and fn(i))\n setattr(self, name % 'cs', lambda i, fn=fn: (self.cpsrC ) and fn(i))\n setattr(self, name % 'hs', lambda i, fn=fn: (self.cpsrC ) and fn(i))\n setattr(self, name % 'cc', lambda i, fn=fn: (not self.cpsrC ) and fn(i))\n setattr(self, name % 'lo', lambda i, fn=fn: (not self.cpsrC ) and fn(i))\n setattr(self, name % 'mi', lambda i, fn=fn: (self.cpsrN ) and fn(i))\n setattr(self, name % 'pl', lambda i, fn=fn: (not self.cpsrN ) and fn(i))\n setattr(self, name % 'vs', lambda i, fn=fn: (self.cpsrV ) and fn(i))\n setattr(self, name % 'vc', lambda i, fn=fn: (not self.cpsrV ) and fn(i))\n setattr(self, name % 'hi', lambda i, fn=fn: (self.cpsrC and not self.cpsrZ ) and fn(i))\n setattr(self, name % 'ls', lambda i, fn=fn: (self.cpsrZ and not self.cpsrC ) and fn(i))\n setattr(self, name % 'ge', lambda i, fn=fn: (((not not self.cpsrN) == (not not self.cpsrV)) ) and fn(i))\n setattr(self, name % 'lt', lambda i, fn=fn: (((not not self.cpsrN) != (not not self.cpsrV)) ) and fn(i))\n setattr(self, name % 'gt', lambda i, fn=fn: (((not not self.cpsrN) == (not not self.cpsrV)) and not self.cpsrZ ) and fn(i))\n setattr(self, name % 'le', lambda i, fn=fn: (((not not self.cpsrN) != (not not self.cpsrV)) or self.cpsrZ ) and fn(i))\n setattr(self, name % 'al', fn)\n\n def _reg_or_literal(self, s):\n if s[0] == '#':\n return int(s[1:], 0) & 0xffffffff\n return self.regs[self.reg_numbers[s]]\n\n def _shifter(self, s):\n # Returns (result, carry)\n l = s.split(', ', 1)\n if len(l) == 2:\n\n t = l[1].split(' ')\n if len(t) == 1:\n # Assumed ror, to match the encoding for 32-bit literals\n r, c = ror(self._reg_or_literal(l[0]), (int(t[0], 0) & 0xffffffff))\n\n else:\n assert t[1][0] == '#'\n op = t[0]\n arg = int(t[1][1:], 0) & 0xffffffff\n \n if op == 'lsl':\n r = self._reg_or_literal(l[0]) << arg\n c = 1 & (r >> 32)\n elif op == 'lsr':\n r = self._reg_or_literal(l[0]) >> arg\n c = 1 & (r >> 32)\n elif op == 'asr':\n r, c = asr(self._reg_or_literal(l[0]), arg)\n elif op == 'rol':\n r, c = rol(self._reg_or_literal(l[0]), arg)\n elif op == 'ror':\n r, c = ror(self._reg_or_literal(l[0]), arg)\n\n return (r & 0xffffffff, c)\n return (self._reg_or_literal(s), 0)\n\n def _reg_or_target(self, s):\n if s in self.reg_numbers:\n return self.regs[self.reg_numbers[s]]\n else:\n return int(s, 0)\n\n def _reladdr(self, right):\n assert right[0] == '['\n if right[-1] == ']':\n # [a, b]\n addrs = right.strip('[]').split(', ')\n v = self.regs[self.reg_numbers[addrs[0]]]\n if len(addrs) > 1:\n v = (v + self._reg_or_literal(addrs[1])) & 0xffffffff\n return v\n else:\n # [a], b\n addrs = right.split(', ')\n v = self.regs[self.reg_numbers[addrs[0].strip('[]')]]\n return (v + self._reg_or_literal(addrs[1])) & 0xffffffff\n\n @staticmethod\n def _3arg(i):\n l = i.args.split(', ', 2)\n if len(l) == 3:\n return l\n else:\n return [l[0]] + l\n\n def _dstpc(self, dst, r):\n if dst == 'pc':\n self._branch = r & 0xfffffffe\n self.thumb = r & 1\n else:\n self.regs[self.reg_numbers[dst]] = r & 0xffffffff\n\n def op_ldr(self, i):\n left, right = i.args.split(', ', 1)\n self._dstpc(left, self.memory.load(self._reladdr(right)))\n\n def op_ldrh(self, i):\n left, right = i.args.split(', ', 1)\n self._dstpc(left, self.memory.load_half(self._reladdr(right)))\n\n def op_ldrb(self, i):\n left, right = i.args.split(', ', 1)\n self._dstpc(left, self.memory.load_byte(self._reladdr(right)))\n\n def op_str(self, i):\n left, right = i.args.split(', ', 1)\n self.memory.store(self._reladdr(right), self.regs[self.reg_numbers[left]])\n\n def op_strh(self, i):\n left, right = i.args.split(', ', 1)\n self.memory.store_half(self._reladdr(right), 0xffff & self.regs[self.reg_numbers[left]])\n\n def op_strb(self, i):\n left, right = i.args.split(', ', 1)\n self.memory.store_byte(self._reladdr(right), 0xff & self.regs[self.reg_numbers[left]])\n\n def op_push(self, i):\n reglist = i.args.strip('{}').split(', ')\n sp = self.regs[13] - 4 * len(reglist)\n self.regs[13] = sp\n for i in range(len(reglist)):\n self.memory.store(sp + 4 * i, self.regs[self.reg_numbers[reglist[i]]])\n\n def op_pop(self, i):\n reglist = i.args.strip('{}').split(', ')\n sp = self.regs[13]\n for i in range(len(reglist)):\n self._dstpc(reglist[i], self.memory.load(sp + 4 * i))\n self.regs[13] = sp + 4 * len(reglist)\n\n def op_bx(self, i):\n if i.args in self.reg_numbers:\n r = self.regs[self.reg_numbers[i.args]]\n self._branch = r & ~1\n self.thumb = r & 1\n else:\n r = int(i.args, 0)\n self._branch = r & ~1\n self.thumb = not self.thumb\n\n def op_bl(self, i):\n self.regs[14] = i.next_address | self.thumb\n self._branch = self._reg_or_target(i.args)\n\n def op_blx(self, i):\n self.regs[14] = i.next_address | self.thumb\n if i.args in self.reg_numbers:\n r = self.regs[self.reg_numbers[i.args]]\n self._branch = r & 0xfffffffe\n self.thumb = r & 1\n else:\n r = int(i.args, 0)\n self._branch = r & 0xfffffffe\n self.thumb = not self.thumb\n\n def op_b(self, i):\n self._branch = int(i.args, 0)\n\n def op_nop(self, i):\n pass\n\n def op_mov(self, i):\n dst, src = i.args.split(', ', 1)\n s, _ = self._shifter(src)\n self._dstpc(dst, s)\n\n def op_movs(self, i):\n dst, src = i.args.split(', ', 1)\n r, self.cpsrC = self._shifter(src)\n self.cpsrZ = r == 0\n self.cpsrN = (r >> 31) & 1\n self._dstpc(dst, r)\n\n def op_mvn(self, i):\n dst, src = i.args.split(', ', 1)\n r, _ = self._shifter(src)\n self._dstpc(dst, r ^ 0xffffffff)\n\n def op_mvns(self, i):\n dst, src = i.args.split(', ', 1)\n r, self.csprC = self._shifter(src)\n r = r ^ 0xffffffff\n self.cpsrZ = r == 0\n self.cpsrN = (r >> 31) & 1\n self._dstpc(dst, r)\n\n def op_bic(self, i):\n dst, src0, src1 = self._3arg(i)\n s, _ = self._shifter(src1)\n r = self.regs[self.reg_numbers[src0]] & ~s\n self._dstpc(dst, r)\n\n def op_bics(self, i):\n dst, src0, src1 = self._3arg(i)\n s, _ = self._shifter(src1)\n r = self.regs[self.reg_numbers[src0]] & ~s\n self.cpsrZ = r == 0\n self.cpsrN = (r >> 31) & 1\n self._dstpc(dst, r)\n\n def op_orr(self, i):\n dst, src0, src1 = self._3arg(i)\n s, _ = self._shifter(src1)\n r = self.regs[self.reg_numbers[src0]] | s\n self._dstpc(dst, r)\n\n def op_orrs(self, i):\n dst, src0, src1 = self._3arg(i)\n s, self.cpsrC = self._shifter(src1)\n r = self.regs[self.reg_numbers[src0]] | s\n self.cpsrZ = r == 0\n self.cpsrN = (r >> 31) & 1\n self._dstpc(dst, r)\n\n def op_and(self, i):\n dst, src0, src1 = self._3arg(i)\n s, _ = self._shifter(src1)\n r = self.regs[self.reg_numbers[src0]] & s\n self._dstpc(dst, r)\n\n def op_ands(self, i):\n dst, src0, src1 = self._3arg(i)\n s, self.cpsrC = self._shifter(src1)\n r = self.regs[self.reg_numbers[src0]] & s\n self.cpsrZ = r == 0\n self.cpsrN = (r >> 31) & 1\n self._dstpc(dst, r)\n\n def op_tst(self, i):\n dst, src0, src1 = self._3arg(i)\n s, self.cpsrC = self._shifter(src1)\n r = self.regs[self.reg_numbers[src0]] & s\n self.cpsrZ = r == 0\n self.cpsrN = (r >> 31) & 1\n\n def op_eor(self, i):\n dst, src0, src1 = self._3arg(i)\n s, _ = self._shifter(src1)\n r = self.regs[self.reg_numbers[src0]] ^ s\n self._dstpc(dst, r)\n\n def op_eors(self, i):\n dst, src0, src1 = self._3arg(i)\n s, self.cpsrC = self._shifter(src1)\n r = self.regs[self.reg_numbers[src0]] ^ s\n self.cpsrZ = r == 0\n self.cpsrN = (r >> 31) & 1\n self._dstpc(dst, r)\n \n def op_add(self, i):\n dst, src0, src1 = self._3arg(i)\n s, _ = self._shifter(src1)\n r = self.regs[self.reg_numbers[src0]] + s\n self._dstpc(dst, r)\n\n def op_adds(self, i):\n dst, src0, src1 = self._3arg(i)\n a = self.regs[self.reg_numbers[src0]]\n b, _ = self._shifter(src1)\n r = a + b\n self.cpsrN = (r >> 31) & 1\n self.cpsrZ = not (r & 0xffffffff)\n self.cpsrC = r > 0xffffffff\n self.cpsrV = ((a >> 31) & 1) == ((b >> 31) & 1) and ((a >> 31) & 1) != ((r >> 31) & 1) and ((b >> 31) & 1) != ((r >> 31) & 1)\n self._dstpc(dst, r)\n\n def op_adc(self, i):\n dst, src0, src1 = self._3arg(i)\n s, _ = self._shifter(src1)\n r = self.regs[self.reg_numbers[src0]] + s + (self.cpsrC & 1)\n self._dstpc(dst, r)\n\n def op_adcs(self, i):\n dst, src0, src1 = self._3arg(i)\n a = self.regs[self.reg_numbers[src0]]\n b, _ = self._shifter(src1)\n r = a + b + (self.cpsrC & 1)\n self.cpsrN = (r >> 31) & 1\n self.cpsrZ = not (r & 0xffffffff)\n self.cpsrC = r > 0xffffffff\n self.cpsrV = ((a >> 31) & 1) == ((b >> 31) & 1) and ((a >> 31) & 1) != ((r >> 31) & 1) and ((b >> 31) & 1) != ((r >> 31) & 1)\n self._dstpc(dst, r)\n\n def op_sub(self, i):\n dst, src0, src1 = self._3arg(i)\n s, _ = self._shifter(src1)\n r = self.regs[self.reg_numbers[src0]] - s\n self._dstpc(dst, r)\n\n def op_subs(self, i):\n dst, src0, src1 = self._3arg(i)\n a = self.regs[self.reg_numbers[src0]]\n b, _ = self._shifter(src1)\n r = a - b\n self.cpsrN = (r >> 31) & 1\n self.cpsrZ = not (r & 0xffffffff)\n self.cpsrC = (a & 0xffffffff) >= (b & 0xffffffff)\n self.cpsrV = ((a >> 31) & 1) != ((b >> 31) & 1) and ((a >> 31) & 1) != ((r >> 31) & 1)\n self._dstpc(dst, r)\n\n def op_rsb(self, i):\n dst, src0, src1 = self._3arg(i)\n s, _ = self._shifter(src1)\n r = s - self.regs[self.reg_numbers[src0]]\n self._dstpc(dst, r)\n\n def op_rsbs(self, i):\n dst, src0, src1 = self._3arg(i)\n b = self.regs[self.reg_numbers[src0]]\n a, _ = self._shifter(src1)\n r = a - b\n self.cpsrN = (r >> 31) & 1\n self.cpsrZ = not (r & 0xffffffff)\n self.cpsrC = (a & 0xffffffff) >= (b & 0xffffffff)\n self.cpsrV = ((a >> 31) & 1) != ((b >> 31) & 1) and ((a >> 31) & 1) != ((r >> 31) & 1)\n self._dstpc(dst, r)\n\n def op_cmp(self, i):\n dst, src0, src1 = self._3arg(i)\n a = self.regs[self.reg_numbers[src0]]\n b, _ = self._shifter(src1)\n r = a - b\n self.cpsrN = (r >> 31) & 1\n self.cpsrZ = not (r & 0xffffffff)\n self.cpsrC = (a & 0xffffffff) >= (b & 0xffffffff)\n self.cpsrV = ((a >> 31) & 1) != ((b >> 31) & 1) and ((a >> 31) & 1) != ((r >> 31) & 1)\n\n def op_lsl(self, i):\n dst, src0, src1 = self._3arg(i)\n r = self.regs[self.reg_numbers[src0]] << self._reg_or_literal(src1)\n self._dstpc(dst, r)\n\n def op_lsls(self, i):\n dst, src0, src1 = self._3arg(i)\n r = self.regs[self.reg_numbers[src0]] << self._reg_or_literal(src1)\n self.cpsrZ = not (r & 0xffffffff)\n self.cpsrN = (r >> 31) & 1\n self.cpsrC = (r >> 32) & 1\n self._dstpc(dst, r)\n\n def op_lsr(self, i):\n dst, src0, src1 = self._3arg(i)\n r = self.regs[self.reg_numbers[src0]] >> self._reg_or_literal(src1)\n self._dstpc(dst, r)\n\n def op_lsrs(self, i):\n dst, src0, src1 = self._3arg(i)\n r = self.regs[self.reg_numbers[src0]] >> self._reg_or_literal(src1)\n self.cpsrZ = not (r & 0xffffffff)\n self.cpsrN = (r >> 31) & 1\n self.cpsrC = (r >> 32) & 1\n self._dstpc(dst, r)\n\n def op_asr(self, i):\n dst, src0, src1 = self._3arg(i)\n r, _ = asr(self.regs[self.reg_numbers[src0]], self._reg_or_literal(src1))\n self._dstpc(dst, r)\n\n def op_asrs(self, i):\n dst, src0, src1 = self._3arg(i)\n r, self.cpsrC = asr(self.regs[self.reg_numbers[src0]], self._reg_or_literal(src1))\n self.cpsrZ = r == 0\n self.cpsrN = (r >> 31) & 1\n self._dstpc(dst, r)\n\n def op_ror(self, i):\n dst, src0, src1 = self._3arg(i)\n r, _ = ror(self.regs[self.reg_numbers[src0]], self._reg_or_literal(src1))\n self._dstpc(dst, r)\n\n def op_rors(self, i):\n dst, src0, src1 = self._3arg(i)\n r, self.csprC = ror(self.regs[self.reg_numbers[src0]], self._reg_or_literal(src1))\n self.cpsrZ = r == 0\n self.cpsrN = (r >> 31) & 1\n self._dstpc(dst, r)\n\n def op_rol(self, i):\n dst, src0, src1 = self._3arg(i)\n r, _ = rol(self.regs[self.reg_numbers[src0]], self._reg_or_literal(src1))\n self._dstpc(dst, r)\n\n def op_rols(self, i):\n dst, src0, src1 = self._3arg(i)\n r, self.cpsrC = rol(self.regs[self.reg_numbers[src0]], self._reg_or_literal(src1))\n self.cpsrZ = r == 0\n self.cpsrN = (r >> 31) & 1\n self._dstpc(dst, r)\n\n def op_mul(self, i):\n dst, src0, src1 = self._3arg(i)\n a = self.regs[self.reg_numbers[src0]]\n b, _ = self._shifter(src1)\n r = a * b\n self.cpsrN = (r >> 31) & 1\n self.cpsrZ = not (r & 0xffffffff)\n self._dstpc(dst, r)\n\n def op_muls(self, i):\n dst, src0, src1 = self._3arg(i)\n a = self.regs[self.reg_numbers[src0]]\n b, _ = self._shifter(src1)\n r = a * b\n self._dstpc(dst, r)\n\n def op_msr(self, i):\n \"\"\"Stub\"\"\"\n dst, src = i.args.split(', ')\n\n def op_mrs(self, i):\n \"\"\"Stub\"\"\"\n dst, src = i.args.split(', ')\n self.regs[self.reg_numbers[dst]] = 0x5d5d5d5d\n\n def op_clz(self, i):\n dst, src = i.args.split(', ', 1)\n r, _ = self._shifter(src)\n i = 0\n while i < 32:\n if r & (1 << i):\n break\n else:\n i += 1\n self._dstpc(dst, i)\n\n", "sub_path": "backdoor/sim_arm.py", "file_name": "sim_arm.py", "file_ext": "py", "file_size_in_byte": 33771, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "64", "api": [{"api_name": "cStringIO.StringIO", "line_number": 113, "usage_type": "call"}, {"api_name": "cStringIO.StringIO", "line_number": 114, "usage_type": "call"}, {"api_name": "cStringIO.StringIO", "line_number": 140, "usage_type": "call"}, {"api_name": "cStringIO.StringIO", "line_number": 143, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 250, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 264, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 290, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 304, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 418, "usage_type": "call"}, {"api_name": "json.load", "line_number": 424, "usage_type": "call"}]} +{"seq_id": "635694599", "text": "\"\"\"\nModel analysis for attention experiments:\n\nUsage:\n experiment_128k.py [--basic]